diff --git a/BUILDING.md b/BUILDING.md
index d32692fe5b6..159b8440f1e 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -59,12 +59,12 @@ Building from source
On first setup, you may need to run `$ mvn install -DskipTests`
to install the local jars. This is a side-effect of multi-module maven projects
-To re-generate the antlr based files:
+To re-generate the antlr based files:
`$ mvn process-sources`
-To build the jars and the assembly tarball:
+To build the jars and the assembly tarball:
`$ mvn package`
-and optionally, to just skip all the tests and build the jars:
+and optionally, to just skip all the tests and build the jars:
`$ mvn package -DskipTests`
Note: javadocs are generated in target/apidocs
@@ -79,7 +79,7 @@ Phoenix, even within the same HBase minor release.
By default, Phoenix will be built for the latest known patch level of the latest HBase 2.x
minor release that Phoenix supports.
-You can specify the targeted HBase minor release by setting the `hbase.profile` system property for
+You can specify the targeted HBase minor release by setting the `hbase.profile` system property for
maven.
You can also specify the exact HBase release to build Phoenix with by additionally
@@ -102,7 +102,7 @@ Use the m2e eclipse plugin and do Import->Maven Project and just pick the root '
Running the tests
-----------------
-All Unit Tests
+All Unit Tests
`$ mvn clean test`
All Unit Tests and Integration tests (takes a few hours)
@@ -133,5 +133,5 @@ as well as each of the subprojects. (not every project has all reports)
Generate Apache Web Site
------------------------
-checkout https://svn.apache.org/repos/asf/phoenix
+checkout https://svn.apache.org/repos/asf/phoenix
`$ build.sh`
diff --git a/Jenkinsfile b/Jenkinsfile
index 2882405ed82..c03afad1a14 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -116,4 +116,4 @@ pipeline {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/Jenkinsfile.yetus b/Jenkinsfile.yetus
index c70dc991e02..a0e9c43f654 100644
--- a/Jenkinsfile.yetus
+++ b/Jenkinsfile.yetus
@@ -55,4 +55,4 @@ pipeline {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/README.md b/README.md
index 6f886e16e78..f6d499101ac 100644
--- a/README.md
+++ b/README.md
@@ -19,4 +19,4 @@ limitations under the License.
[Apache Phoenix](http://phoenix.apache.org/) is a SQL skin over HBase delivered as a client-embedded JDBC driver targeting low latency queries over HBase data. Visit the Apache Phoenix website [here](http://phoenix.apache.org/).
-Copyright ©2014 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved.
+Copyright ©2014 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved.
diff --git a/bin/end2endTest.py b/bin/end2endTest.py
index 7de0a0423fd..b3b119fa64c 100755
--- a/bin/end2endTest.py
+++ b/bin/end2endTest.py
@@ -21,7 +21,7 @@
# !!! PLEASE READ !!!
# !!! Do NOT run the script against a prodcution cluster because it wipes out
-# !!! existing data of the cluster
+# !!! existing data of the cluster
from __future__ import print_function
import os
diff --git a/bin/pherf-standalone.py b/bin/pherf-standalone.py
index 14f5500fb15..62e1acb582d 100755
--- a/bin/pherf-standalone.py
+++ b/bin/pherf-standalone.py
@@ -40,6 +40,6 @@
phoenix_utils.phoenix_pherf_jar + \
'" -Dlog4j2.configurationFile=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \
- " org.apache.phoenix.pherf.Pherf " + args
+ " org.apache.phoenix.pherf.Pherf " + args
os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 48e7a42d5ab..4cb3182f025 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -33,14 +33,14 @@ def find(pattern, classPaths):
# remove * if it's at the end of path
if ((path is not None) and (len(path) > 0) and (path[-1] == '*')) :
path = path[:-1]
-
+
for root, dirs, files in os.walk(path):
# sort the file names so *-client always precedes *-thin-client
files.sort()
for name in files:
if fnmatch.fnmatch(name, pattern):
return os.path.join(root, name)
-
+
return ""
def tryDecode(input):
diff --git a/bin/psql.py b/bin/psql.py
index 03d76bc8309..2a82cf68ba8 100755
--- a/bin/psql.py
+++ b/bin/psql.py
@@ -36,7 +36,7 @@
os.pathsep + phoenix_utils.logging_jar + \
os.pathsep + phoenix_utils.phoenix_client_embedded_jar + '" -Dlog4j2.configurationFile=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \
- " org.apache.phoenix.util.PhoenixRuntime " + args
+ " org.apache.phoenix.util.PhoenixRuntime " + args
print(java_cmd)
diff --git a/bin/readme.txt b/bin/readme.txt
index e9c52439da2..43499e01091 100644
--- a/bin/readme.txt
+++ b/bin/readme.txt
@@ -3,10 +3,10 @@ SqlLine
https://github.com/julianhyde/sqlline
Execute SQL from command line. Sqlline manual is available at https://julianhyde.github.io/sqlline/manual.html
-
- Usage:
- $ sqlline.py
- Example:
+
+ Usage:
+ $ sqlline.py
+ Example:
$ sqlline.py localhost
$ sqlline.py localhost /STOCK_SYMBOL.sql
@@ -47,4 +47,3 @@ Usage: hadoop jar phoneix-[version]-mapreduce.jar
-s,--schema Phoenix schema name (optional)
-t,--table Phoenix table name (mandatory)
-z,--zookeeper Zookeeper quorum to connect to (optional)
-
diff --git a/bin/traceserver.py b/bin/traceserver.py
index 706cbd4421c..68522b705e3 100755
--- a/bin/traceserver.py
+++ b/bin/traceserver.py
@@ -123,7 +123,7 @@
phoenix_utils.phoenix_traceserver_jar + os.pathsep + phoenix_utils.slf4j_backend_jar + os.pathsep + \
phoenix_utils.logging_jar + os.pathsep + \
phoenix_utils.phoenix_client_embedded_jar + os.pathsep + phoenix_utils.phoenix_queryserver_jar + \
-
+
" -Dproc_phoenixtraceserver" + \
" -Dlog4j2.configurationFile=file:" + os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \
" -Dpsql.root.logger=%(root_logger)s" + \
diff --git a/dev/PhoenixCodeTemplate.xml b/dev/PhoenixCodeTemplate.xml
index 318b30d9bff..3ad1dcb285f 100644
--- a/dev/PhoenixCodeTemplate.xml
+++ b/dev/PhoenixCodeTemplate.xml
@@ -1,312 +1,418 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dev/cache-apache-project-artifact.sh b/dev/cache-apache-project-artifact.sh
index 2b1e922fc59..58433661b93 100755
--- a/dev/cache-apache-project-artifact.sh
+++ b/dev/cache-apache-project-artifact.sh
@@ -138,4 +138,4 @@ echo "moving artifact into place at '${target}'"
mv "${working_dir}/artifact" "${target}.copying"
# attempt atomic move
mv "${target}.copying" "${target}"
-echo "all done!"
\ No newline at end of file
+echo "all done!"
diff --git a/dev/create-release/release-util.sh b/dev/create-release/release-util.sh
index af3a6556499..211fc4f813e 100755
--- a/dev/create-release/release-util.sh
+++ b/dev/create-release/release-util.sh
@@ -771,4 +771,4 @@ function rebuild_hbase_for_omid() {
function rebuild_hbase_locally() {
local hbase_version="$1"
MAVEN_SETTINGS_FILE="$MAVEN_SETTINGS_FILE" "$SELF"/rebuild_hbase.sh "$hbase_version"
-}
\ No newline at end of file
+}
diff --git a/dev/jenkinsEnv.sh b/dev/jenkinsEnv.sh
index 2717cb7ed6b..fc5839db887 100755
--- a/dev/jenkinsEnv.sh
+++ b/dev/jenkinsEnv.sh
@@ -28,4 +28,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:$MAVEN_HOME/bin
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData -XX:MaxPermSize=256m"}"
ulimit -n
-
diff --git a/dev/misc_utils/README.md b/dev/misc_utils/README.md
index f6946e03346..b11fd0abeab 100644
--- a/dev/misc_utils/README.md
+++ b/dev/misc_utils/README.md
@@ -72,11 +72,11 @@ The script also requires below inputs:
Example of script execution:
```
-$ python3 dev/misc_utils/git_jira_fix_version_check.py
+$ python3 dev/misc_utils/git_jira_fix_version_check.py
JIRA Project Name (e.g PHOENIX / OMID / TEPHRA etc): PHOENIX
First commit hash to start excluding commits from history: a2adf5e572c5a4bcccee7f8ac43bad6b84293ec6
Fix Version: 4.16.0
-Jira server url (default: https://issues.apache.org/jira):
+Jira server url (default: https://issues.apache.org/jira):
Path of project's working dir with release branch checked-in: /Users/{USER}/Documents/phoenix
Check git status output and verify expected branch
@@ -114,5 +114,3 @@ Completed diff: ##############################################
```
-
-
diff --git a/dev/rebuild_hbase.sh b/dev/rebuild_hbase.sh
index 6e022082414..2fe927c4616 100755
--- a/dev/rebuild_hbase.sh
+++ b/dev/rebuild_hbase.sh
@@ -73,4 +73,3 @@ cd hbase-$HBASE_VERSION
echo mvn ${SETTINGS[@]} clean install -Dhadoop.profile=3.0 -DskipTests -B $LOCALREPO
mvn ${SETTINGS[@]} clean install -Dhadoop.profile=3.0 -DskipTests -B $LOCALREPO
cd ${STARTDIR}
-
diff --git a/dev/smart-apply-patch.sh b/dev/smart-apply-patch.sh
index 0b69eabaf89..26dff4b4721 100755
--- a/dev/smart-apply-patch.sh
+++ b/dev/smart-apply-patch.sh
@@ -52,7 +52,7 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then
# correct place to put those files.
# NOTE 2014/07/17:
-# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash
+# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash
# causing below checks to fail. Once it is fixed, we can revert the commit and enable this again.
# TMP2=/tmp/tmp.paths.2.$$
diff --git a/dev/test-patch.sh b/dev/test-patch.sh
index 62a6cd82d20..7fc0b791168 100755
--- a/dev/test-patch.sh
+++ b/dev/test-patch.sh
@@ -19,7 +19,7 @@
set -x
-### Setup some variables.
+### Setup some variables.
### GIT_COMMIT and BUILD_URL are set by Hudson if it is run by patch process
### Read variables from properties file
bindir=$(dirname $0)
@@ -160,7 +160,7 @@ parseArgs() {
### Check if $PATCH_DIR exists. If it does not exist, create a new directory
if [[ ! -e "$PATCH_DIR" ]] ; then
mkdir "$PATCH_DIR"
- if [[ $? == 0 ]] ; then
+ if [[ $? == 0 ]] ; then
echo "$PATCH_DIR has been created"
else
echo "Unable to create $PATCH_DIR"
@@ -296,7 +296,7 @@ setup () {
VERSION=${GIT_COMMIT}_${defect}_PATCH-${patchNum}
findBranchNameFromPatchName ${relativePatchURL}
checkoutBranch
- JIRA_COMMENT="Here are the results of testing the latest attachment
+ JIRA_COMMENT="Here are the results of testing the latest attachment
$patchURL
against ${BRANCH_NAME} branch at commit ${GIT_COMMIT}.
ATTACHMENT ID: ${ATTACHMENT_ID}"
@@ -485,7 +485,7 @@ applyPatch () {
echo "======================================================================"
echo ""
echo ""
-
+
export PATCH
$BASEDIR/dev/smart-apply-patch.sh $PATCH_DIR/patch
if [[ $? != 0 ]] ; then
@@ -786,7 +786,7 @@ checkFindbugsWarnings () {
echo "======================================================================"
echo ""
echo ""
- echo "$MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess"
+ echo "$MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess"
export MAVEN_OPTS="${MAVEN_OPTS}"
$MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess -DskipTests < /dev/null
@@ -797,7 +797,7 @@ checkFindbugsWarnings () {
return 1
fi
- collectFindbugsReports patch $BASEDIR $PATCH_DIR
+ collectFindbugsReports patch $BASEDIR $PATCH_DIR
#this files are generated by collectFindbugsReports() named with its first argument
patch_xml=$PATCH_DIR/patchFindbugsWarnings.xml
trunk_xml=$PATCH_DIR/trunkFindbugsWarnings.xml
@@ -884,7 +884,7 @@ runTests () {
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E " /dev/null
@@ -1004,7 +1004,7 @@ $JIRA_COMMENT_FOOTER"
-$comment"
+$comment"
if [[ $JENKINS == "true" ]] ; then
echo ""
@@ -1099,7 +1099,7 @@ checkLineLengths
(( RESULT = RESULT + $? ))
# checkSiteXml
# (( RESULT = RESULT + $?))
-### Do not call these when run by a developer
+### Do not call these when run by a developer
if [[ $JENKINS == "true" ]] ; then
runTests
(( RESULT = RESULT + $? ))
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 0ff54a34ff3..2271018980f 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -15,9 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
+
4.0.0
org.apache.phoenix
@@ -25,9 +23,9 @@
5.3.0-SNAPSHOT
phoenix-assembly
+ pom
Phoenix Assembly
Assemble Phoenix artifacts
- pom
true
@@ -36,26 +34,59 @@
${project.parent.basedir}
+
+
+
+ org.apache.phoenix
+ phoenix-server-${hbase.suffix}
+
+
+ org.apache.phoenix
+ phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}
+
+
+ org.apache.phoenix
+ phoenix-client-embedded-${hbase.suffix}
+
+
+ org.apache.phoenix
+ phoenix-client-lite-${hbase.suffix}
+
+
+ org.apache.phoenix
+ phoenix-pherf
+
+
+ org.apache.phoenix
+ phoenix-tracing-webapp
+
+
+ sqlline
+ sqlline
+ ${sqlline.version}
+ jar-with-dependencies
+
+
+
+
- exec-maven-plugin
org.codehaus.mojo
+ exec-maven-plugin
embedded client without version
- compile
exec
+ compile
ln
${project.basedir}/../phoenix-client-parent/phoenix-client-embedded/target
-fnsv
-
- phoenix-client-embedded-${hbase.suffix}-${project.version}.jar
-
+ phoenix-client-embedded-${hbase.suffix}-${project.version}.jar
phoenix-client-embedded-${hbase.suffix}.jar
@@ -65,18 +96,16 @@
lite client without version
- compile
exec
+ compile
ln
${project.basedir}/../phoenix-client-parent/phoenix-client-lite/target
-fnsv
-
- phoenix-client-lite-${hbase.suffix}-${project.version}.jar
-
+ phoenix-client-lite-${hbase.suffix}-${project.version}.jar
phoenix-client-lite-${hbase.suffix}.jar
@@ -86,18 +115,16 @@
server without version
- compile
exec
+ compile
ln
${project.basedir}/../phoenix-server/target
-fnsv
-
- phoenix-server-${hbase.suffix}-${project.version}.jar
-
+ phoenix-server-${hbase.suffix}-${project.version}.jar
phoenix-server-${hbase.suffix}.jar
@@ -107,18 +134,16 @@
mapreduce without version
- compile
exec
+ compile
ln
${project.basedir}/../phoenix-mapreduce-byo-shaded-hbase/target
-fnsv
-
- phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}-${project.version}.jar
-
+ phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}-${project.version}.jar
phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}.jar
@@ -128,21 +153,17 @@
pherf without version
- compile
exec
+ compile
ln
${project.basedir}/../phoenix-pherf/target
-fnsv
-
- phoenix-pherf-${project.version}.jar
-
-
- phoenix-pherf.jar
-
+ phoenix-pherf-${project.version}.jar
+ phoenix-pherf.jar
@@ -155,8 +176,8 @@
default-jar
- none
+ none
@@ -165,12 +186,12 @@
package-to-tar
- package
single
+ package
- phoenix-${hbase.suffix}-${project.version}-bin
+ phoenix-${hbase.suffix}-${project.version}-bin
false
gnu
false
@@ -185,66 +206,9 @@
-
-
-
- org.apache.phoenix
- phoenix-server-${hbase.suffix}
-
-
- org.apache.phoenix
- phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}
-
-
- org.apache.phoenix
- phoenix-client-embedded-${hbase.suffix}
-
-
- org.apache.phoenix
- phoenix-client-lite-${hbase.suffix}
-
-
- org.apache.phoenix
- phoenix-pherf
-
-
- org.apache.phoenix
- phoenix-tracing-webapp
-
-
- sqlline
- sqlline
- ${sqlline.version}
- jar-with-dependencies
-
-
-
-
coverage
-
-
-
- org.jacoco
- jacoco-maven-plugin
-
-
- report-aggregate
-
- report-aggregate
-
- verify
-
- ${jacocoReportDir}
- ${project.build.sourceEncoding}
- ${project.reporting.outputEncoding}
-
-
-
-
-
-
org.apache.phoenix
@@ -282,6 +246,28 @@
${project.version}
+
+
+
+ org.jacoco
+ jacoco-maven-plugin
+
+
+ report-aggregate
+
+ report-aggregate
+
+ verify
+
+ ${jacocoReportDir}
+ ${project.build.sourceEncoding}
+ ${project.reporting.outputEncoding}
+
+
+
+
+
+
diff --git a/phoenix-assembly/src/build/components/all-common-dependencies.xml b/phoenix-assembly/src/build/components/all-common-dependencies.xml
index 4a5fd9bf865..1720212b1fa 100644
--- a/phoenix-assembly/src/build/components/all-common-dependencies.xml
+++ b/phoenix-assembly/src/build/components/all-common-dependencies.xml
@@ -31,4 +31,4 @@
-
\ No newline at end of file
+
diff --git a/phoenix-assembly/src/build/package-to-tar-all.xml b/phoenix-assembly/src/build/package-to-tar-all.xml
index 9683ea714b1..9dd292842a2 100644
--- a/phoenix-assembly/src/build/package-to-tar-all.xml
+++ b/phoenix-assembly/src/build/package-to-tar-all.xml
@@ -36,4 +36,4 @@
src/build/components/all-common-files.xml
src/build/components/all-common-dependencies.xml
-
\ No newline at end of file
+
diff --git a/phoenix-client-parent/phoenix-client-embedded/pom.xml b/phoenix-client-parent/phoenix-client-embedded/pom.xml
index 3a6cadce0e3..b61287a6b97 100644
--- a/phoenix-client-parent/phoenix-client-embedded/pom.xml
+++ b/phoenix-client-parent/phoenix-client-embedded/pom.xml
@@ -15,9 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
+
4.0.0
org.apache.phoenix
@@ -26,6 +24,8 @@
phoenix-client-embedded-${hbase.suffix}
+
+ jar
Phoenix Client Embedded
Phoenix Client without logging implementation
@@ -33,41 +33,6 @@
${project.basedir}/../..
- jar
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
-
-
- embedded-shaded
- package
-
- shade
-
-
- true
- false
- ${shadeSources}
- ${basedir}/target/pom.xml
-
-
- *:*
-
-
- org.apache.phoenix:phoenix-client
- xom:xom
-
-
-
-
-
-
-
-
-
@@ -103,38 +68,71 @@
org.eclipse.jetty
jetty-server
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-util
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-util-ajax
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-servlet
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-webapp
- provided
${jetty.version}
+ provided
javax.servlet
javax.servlet-api
- provided
${javax.servlet-api.version}
+ provided
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ embedded-shaded
+
+ shade
+
+ package
+
+ true
+ false
+ ${shadeSources}
+ ${basedir}/target/pom.xml
+
+
+ *:*
+
+
+ org.apache.phoenix:phoenix-client
+ xom:xom
+
+
+
+
+
+
+
+
diff --git a/phoenix-client-parent/phoenix-client-lite/pom.xml b/phoenix-client-parent/phoenix-client-lite/pom.xml
index 53a85e1f3b5..25cd5f2a004 100644
--- a/phoenix-client-parent/phoenix-client-lite/pom.xml
+++ b/phoenix-client-parent/phoenix-client-lite/pom.xml
@@ -15,9 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
+
4.0.0
org.apache.phoenix
@@ -26,6 +24,8 @@
phoenix-client-lite-${hbase.suffix}
+
+ jar
Phoenix Client Lite
Phoenix Client without server-side code and dependencies
@@ -33,41 +33,6 @@
${project.basedir}/../..
- jar
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
-
-
- lite-shaded
- package
-
- shade
-
-
- true
- false
- ${shadeSources}
- ${basedir}/target/pom.xml
-
-
- *:*
-
-
- org.apache.phoenix:phoenix-client
- xom:xom
-
-
-
-
-
-
-
-
-
@@ -103,38 +68,71 @@
org.eclipse.jetty
jetty-server
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-util
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-util-ajax
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-servlet
- provided
${jetty.version}
+ provided
org.eclipse.jetty
jetty-webapp
- provided
${jetty.version}
+ provided
javax.servlet
javax.servlet-api
- provided
${javax.servlet-api.version}
+ provided
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ lite-shaded
+
+ shade
+
+ package
+
+ true
+ false
+ ${shadeSources}
+ ${basedir}/target/pom.xml
+
+
+ *:*
+
+
+ org.apache.phoenix:phoenix-client
+ xom:xom
+
+
+
+
+
+
+
+
diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml
index 0b3f15b8dfd..574fbac0b0e 100644
--- a/phoenix-client-parent/pom.xml
+++ b/phoenix-client-parent/pom.xml
@@ -15,9 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
+
4.0.0
org.apache.phoenix
@@ -25,9 +23,9 @@
5.3.0-SNAPSHOT
phoenix-client-parent
+ pom
Phoenix Client Parent
Common configuration for the Phoenix Client Variants
- pom
true
@@ -66,7 +64,7 @@
*
-
+
org.apache.maven.plugins
@@ -88,7 +86,7 @@
README*
-
+
org.apache.hadoop:hadoop-yarn-common
org/apache/hadoop/yarn/factories/package-info.class
@@ -115,27 +113,20 @@
-
-
+
+
csv-bulk-load-config.properties
-
- ${project.basedir}/../config/csv-bulk-load-config.properties
-
+ ${project.basedir}/../config/csv-bulk-load-config.properties
-
+
README.md
${project.basedir}/../README.md
-
+
LICENSE.txt
${project.basedir}/../LICENSE
-
+
NOTICE
${project.basedir}/../NOTICE
diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
index ad65b74f39a..239499efff9 100644
--- a/phoenix-core-client/pom.xml
+++ b/phoenix-core-client/pom.xml
@@ -15,8 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
+
4.0.0
@@ -28,189 +27,6 @@
Phoenix Core Client
Core Phoenix Client codebase
-
-
-
-
- org.apache.maven.plugins
- maven-site-plugin
-
-
-
- org.apache.maven.plugins
- maven-enforcer-plugin
-
-
-
-
- import java.util.regex.Pattern;
- import java.lang.Integer;
-
- versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$");
- versionMatcher = versionPattern.matcher("${hbase.version}");
- versionMatcher.find();
-
- hbaseMajor = Integer.parseInt(versionMatcher.group(1));
- hbaseMinor = Integer.parseInt(versionMatcher.group(2));
- hbasePatch = Integer.parseInt(versionMatcher.group(3));
-
- hbaseMajor == 2 && (
- ("${hbase.compat.version}".equals("2.4.1")
- && hbaseMinor == 4
- && hbasePatch >=1)
- || ("${hbase.compat.version}".equals("2.5.0")
- && hbaseMinor == 5
- && hbasePatch >=0)
- || ("${hbase.compat.version}".equals("2.5.4")
- && hbaseMinor == 5
- && hbasePatch >=4)
- || ("${hbase.compat.version}".equals("2.6.0")
- && hbaseMinor == 6
- && hbasePatch >=0)
- )
-
-
-
-
-
-
- check-hbase-compatibility
- validate
-
- enforce
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
- generate-sources
-
- add-source
-
-
-
- ${antlr-output.dir}
- ${antlr-input.dir}
-
-
-
-
-
-
-
- org.antlr
- antlr3-maven-plugin
-
-
-
- antlr
-
-
-
-
- ${antlr-output.dir}/org/apache/phoenix/parse
-
-
-
-
- org.apache.maven.plugins
- maven-eclipse-plugin
-
-
- org.jamon.project.templateBuilder
- org.eclipse.jdt.core.javabuilder
-
-
-
-
- org.apache.maven.plugins
- maven-dependency-plugin
-
-
-
- create-phoenix-generated-classpath
-
- build-classpath
-
-
- ${project.build.directory}/cached_classpath.txt
-
-
-
-
- copy-for-sqlline
-
- copy
-
-
-
-
- org.apache.logging.log4j
- log4j-api
-
-
- org.apache.logging.log4j
- log4j-core
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
- org.apache.logging.log4j
- log4j-1.2-api
-
-
- sqlline
- sqlline
- jar-with-dependencies
-
-
- ${project.basedir}/../lib
-
-
-
-
-
- org.apache.rat
- apache-rat-plugin
-
-
- src/main/java/org/apache/phoenix/coprocessor/generated/*.java
- src/main/resources/META-INF/services/java.sql.Driver
- src/it/resources/json/*.json
-
-
-
-
- org.xolstice.maven.plugins
- protobuf-maven-plugin
-
-
- compile-protoc
- generate-sources
-
- compile
-
-
- ${protobuf.group}:protoc:${protoc.version}:exe:${protoc.arch}
- ${basedir}/src/main/protobuf/
- false
- true
-
-
-
-
-
-
-
@@ -431,7 +247,7 @@
joni
- org.jruby.jcodings
+ org.jruby.jcodings
jcodings
@@ -443,4 +259,185 @@
mvel2
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-site-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+
+ import java.util.regex.Pattern;
+ import java.lang.Integer;
+
+ versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$");
+ versionMatcher = versionPattern.matcher("${hbase.version}");
+ versionMatcher.find();
+
+ hbaseMajor = Integer.parseInt(versionMatcher.group(1));
+ hbaseMinor = Integer.parseInt(versionMatcher.group(2));
+ hbasePatch = Integer.parseInt(versionMatcher.group(3));
+
+ hbaseMajor == 2 && (
+ ("${hbase.compat.version}".equals("2.4.1")
+ && hbaseMinor == 4
+ && hbasePatch >=1)
+ || ("${hbase.compat.version}".equals("2.5.0")
+ && hbaseMinor == 5
+ && hbasePatch >=0)
+ || ("${hbase.compat.version}".equals("2.5.4")
+ && hbaseMinor == 5
+ && hbasePatch >=4)
+ || ("${hbase.compat.version}".equals("2.6.0")
+ && hbaseMinor == 6
+ && hbasePatch >=0)
+ )
+
+
+
+
+
+ check-hbase-compatibility
+
+ enforce
+
+ validate
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-source
+
+ add-source
+
+ generate-sources
+
+
+ ${antlr-output.dir}
+ ${antlr-input.dir}
+
+
+
+
+
+
+
+ org.antlr
+ antlr3-maven-plugin
+
+ ${antlr-output.dir}/org/apache/phoenix/parse
+
+
+
+
+ antlr
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-eclipse-plugin
+
+
+ org.jamon.project.templateBuilder
+ org.eclipse.jdt.core.javabuilder
+
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+
+
+
+ create-phoenix-generated-classpath
+
+ build-classpath
+
+
+ ${project.build.directory}/cached_classpath.txt
+
+
+
+
+ copy-for-sqlline
+
+ copy
+
+
+
+
+ org.apache.logging.log4j
+ log4j-api
+
+
+ org.apache.logging.log4j
+ log4j-core
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+ org.apache.logging.log4j
+ log4j-1.2-api
+
+
+ sqlline
+ sqlline
+ jar-with-dependencies
+
+
+ ${project.basedir}/../lib
+
+
+
+
+
+ org.apache.rat
+ apache-rat-plugin
+
+
+ src/main/java/org/apache/phoenix/coprocessor/generated/*.java
+ src/main/resources/META-INF/services/java.sql.Driver
+ src/it/resources/json/*.json
+
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+
+
+ compile-protoc
+
+ compile
+
+ generate-sources
+
+ ${protobuf.group}:protoc:${protoc.version}:exe:${protoc.arch}
+ ${basedir}/src/main/protobuf/
+ false
+ true
+
+
+
+
+
+
diff --git a/phoenix-core-client/src/build/phoenix-core.xml b/phoenix-core-client/src/build/phoenix-core.xml
index 7b8df1ef33b..383488f1472 100644
--- a/phoenix-core-client/src/build/phoenix-core.xml
+++ b/phoenix-core-client/src/build/phoenix-core.xml
@@ -29,10 +29,10 @@
jar
false
-
+
-
true
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
index da2064e2d01..3b9f17c0bac 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,12 +18,12 @@
package org.apache.hadoop.hbase;
/**
- Used to persist the TagType in HBase Cell Tags.
- All the type present here should be more than @{@link Tag#CUSTOM_TAG_TYPE_RANGE} which is 64.
+ * Used to persist the TagType in HBase Cell Tags. All the type present here should be more
+ * than @{@link Tag#CUSTOM_TAG_TYPE_RANGE} which is 64.
**/
public final class PhoenixTagType {
- /**
- * Indicates the source of operation.
- */
- public static final byte SOURCE_OPERATION_TAG_TYPE = (byte) 65;
+ /**
+ * Indicates the source of operation.
+ */
+ public static final byte SOURCE_OPERATION_TAG_TYPE = (byte) 65;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
index fd83af73765..8a00de568c9 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,9 +16,9 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
-
+
public class RegionInfoUtil {
- public static byte[] toByteArray(RegionInfo regionInfo) {
- return RegionInfo.toByteArray(regionInfo);
- }
-}
\ No newline at end of file
+ public static byte[] toByteArray(RegionInfo regionInfo) {
+ return RegionInfo.toByteArray(regionInfo);
+ }
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
index f90d6401c98..a5cfe3541e0 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -26,35 +26,35 @@
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
/**
- * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed
- * in its own queue.
+ * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed in its
+ * own queue.
*/
public class ClientRpcControllerFactory extends RpcControllerFactory {
- public ClientRpcControllerFactory(Configuration conf) {
- super(conf);
- }
-
- @Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(CellScanner cellScanner) {
- HBaseRpcController delegate = super.newController(cellScanner);
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(List cellIterables) {
- HBaseRpcController delegate = super.newController(cellIterables);
- return getController(delegate);
- }
-
- private HBaseRpcController getController(HBaseRpcController delegate) {
- return new MetadataRpcController(delegate, conf);
- }
-
-}
\ No newline at end of file
+ public ClientRpcControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(CellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ private HBaseRpcController getController(HBaseRpcController delegate) {
+ return new MetadataRpcController(delegate, conf);
+ }
+
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
index 0e876fe6ae1..7d1d34c6d63 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -23,9 +23,9 @@
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.IndexUtil;
import com.google.protobuf.RpcController;
-import org.apache.phoenix.util.IndexUtil;
/**
* {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix index
@@ -33,25 +33,23 @@
*/
class IndexRpcController extends DelegatingHBaseRpcController {
- private final int priority;
- private final String tracingTableName;
-
- public IndexRpcController(HBaseRpcController delegate, Configuration conf) {
- super(delegate);
- this.priority = IndexUtil.getIndexPriority(conf);
- this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
- QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
- }
-
- @Override
- public void setPriority(final TableName tn) {
- if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) {
- setPriority(this.priority);
- }
- else {
- super.setPriority(tn);
- }
+ private final int priority;
+ private final String tracingTableName;
+
+ public IndexRpcController(HBaseRpcController delegate, Configuration conf) {
+ super(delegate);
+ this.priority = IndexUtil.getIndexPriority(conf);
+ this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
+ QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+ }
+
+ @Override
+ public void setPriority(final TableName tn) {
+ if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) {
+ setPriority(this.priority);
+ } else {
+ super.setPriority(tn);
}
-
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
index c761f9cdc54..26073080f76 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,39 +26,38 @@
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
/**
- * RpcControllerFactory that should only be used when creating Table for
- * making remote RPCs to the region servers hosting global mutable index table regions.
- * This controller factory shouldn't be globally configured anywhere and is meant to be used
- * only internally by Phoenix indexing code.
+ * RpcControllerFactory that should only be used when creating Table for making remote RPCs to the
+ * region servers hosting global mutable index table regions. This controller factory shouldn't be
+ * globally configured anywhere and is meant to be used only internally by Phoenix indexing code.
*/
public class InterRegionServerIndexRpcControllerFactory extends RpcControllerFactory {
- public InterRegionServerIndexRpcControllerFactory(Configuration conf) {
- super(conf);
- }
-
- @Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(CellScanner cellScanner) {
- HBaseRpcController delegate = super.newController(cellScanner);
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(List cellIterables) {
- HBaseRpcController delegate = super.newController(cellIterables);
- return getController(delegate);
- }
-
- private HBaseRpcController getController(HBaseRpcController delegate) {
- // construct a chain of controllers: metadata, index and standard controller
- IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
- return new MetadataRpcController(indexRpcController, conf);
- }
+ public InterRegionServerIndexRpcControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(CellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ private HBaseRpcController getController(HBaseRpcController delegate) {
+ // construct a chain of controllers: metadata, index and standard controller
+ IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
+ return new MetadataRpcController(indexRpcController, conf);
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java
index cdfa9da168f..c13439547b9 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,25 +27,25 @@
* Controller used to invalidate server side metadata cache RPCs.
*/
public class InvalidateMetadataCacheController extends DelegatingHBaseRpcController {
- private int priority;
+ private int priority;
- public InvalidateMetadataCacheController(HBaseRpcController delegate, Configuration conf) {
- super(delegate);
- this.priority = IndexUtil.getInvalidateMetadataCachePriority(conf);
- }
+ public InvalidateMetadataCacheController(HBaseRpcController delegate, Configuration conf) {
+ super(delegate);
+ this.priority = IndexUtil.getInvalidateMetadataCachePriority(conf);
+ }
- @Override
- public void setPriority(int priority) {
- this.priority = priority;
- }
+ @Override
+ public void setPriority(int priority) {
+ this.priority = priority;
+ }
- @Override
- public void setPriority(TableName tn) {
- // Nothing
- }
+ @Override
+ public void setPriority(TableName tn) {
+ // Nothing
+ }
- @Override
- public int getPriority() {
- return this.priority;
- }
+ @Override
+ public int getPriority() {
+ return this.priority;
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java
index ee6b3b24ffa..3a0c13366ec 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,17 +25,17 @@
* Factory to instantiate InvalidateMetadataCacheControllers
*/
public class InvalidateMetadataCacheControllerFactory extends RpcControllerFactory {
- public InvalidateMetadataCacheControllerFactory(Configuration conf) {
- super(conf);
- }
+ public InvalidateMetadataCacheControllerFactory(Configuration conf) {
+ super(conf);
+ }
- @Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
- private HBaseRpcController getController(HBaseRpcController delegate) {
- return new InvalidateMetadataCacheController(delegate, conf);
- }
+ private HBaseRpcController getController(HBaseRpcController delegate) {
+ return new InvalidateMetadataCacheController(delegate, conf);
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
index 16ad4394427..f4159bf4076 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -24,10 +24,10 @@
import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
import com.google.protobuf.RpcController;
/**
@@ -36,39 +36,37 @@
*/
class MetadataRpcController extends DelegatingHBaseRpcController {
- private int priority;
- // list of system tables
- private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder()
- .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
- .getNameAsString())
- .build();
+ private int priority;
+ // list of system tables
+ private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder()
+ .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME).add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
+ .add(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)
+ .add(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME)
+ .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
+ .getNameAsString())
+ .build();
- public MetadataRpcController(HBaseRpcController delegate,
- Configuration conf) {
- super(delegate);
- this.priority = IndexUtil.getMetadataPriority(conf);
- }
+ public MetadataRpcController(HBaseRpcController delegate, Configuration conf) {
+ super(delegate);
+ this.priority = IndexUtil.getMetadataPriority(conf);
+ }
- @Override
- public void setPriority(final TableName tn) {
- if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
- setPriority(this.priority);
- } else {
- super.setPriority(tn);
- }
- }
+ @Override
+ public void setPriority(final TableName tn) {
+ if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+ setPriority(this.priority);
+ } else {
+ super.setPriority(tn);
+ }
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
index a1a97cf6ce7..3ff84ef353d 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,18 +21,18 @@
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
/**
- * {@link RpcControllerFactory} that should only be used when
- * making server-server remote RPCs to the region servers hosting Phoenix SYSTEM tables.
+ * {@link RpcControllerFactory} that should only be used when making server-server remote RPCs to
+ * the region servers hosting Phoenix SYSTEM tables.
*/
-public class ServerSideRPCControllerFactory {
+public class ServerSideRPCControllerFactory {
- protected final Configuration conf;
+ protected final Configuration conf;
- public ServerSideRPCControllerFactory(Configuration conf) {
- this.conf = conf;
- }
+ public ServerSideRPCControllerFactory(Configuration conf) {
+ this.conf = conf;
+ }
- public ServerToServerRpcController newController() {
- return new ServerToServerRpcControllerImpl(this.conf);
- }
+ public ServerToServerRpcController newController() {
+ return new ServerToServerRpcControllerImpl(this.conf);
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
index 4916168b9db..0bb5b4e56e4 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,28 +15,26 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.ipc.controller;
-import com.google.protobuf.RpcController;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import com.google.protobuf.RpcController;
+
public interface ServerToServerRpcController extends RpcController {
- /**
- * @param priority Priority for this request; should fall roughly in the range
- * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS}
- */
- void setPriority(int priority);
+ /**
+ * @param priority Priority for this request; should fall roughly in the range
+ * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS}
+ */
+ void setPriority(int priority);
- /**
- * @param tn Set priority based off the table we are going against.
- */
- void setPriority(final TableName tn);
+ /**
+ * @param tn Set priority based off the table we are going against.
+ */
+ void setPriority(final TableName tn);
- /**
- * @return The priority of this request
- */
- int getPriority();
+ /** Returns The priority of this request */
+ int getPriority();
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java
index 8e12d2ec2a7..28bcd50fa0f 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,10 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.ipc.controller;
-import com.google.protobuf.RpcController;
+import java.util.List;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
@@ -27,49 +27,48 @@
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.SchemaUtil;
-import java.util.List;
+import com.google.protobuf.RpcController;
/**
- * {@link RpcController} that sets the appropriate priority of server-server RPC calls destined
- * for Phoenix SYSTEM tables.
+ * {@link RpcController} that sets the appropriate priority of server-server RPC calls destined for
+ * Phoenix SYSTEM tables.
*/
-public class ServerToServerRpcControllerImpl extends ServerRpcController implements
- ServerToServerRpcController {
+public class ServerToServerRpcControllerImpl extends ServerRpcController
+ implements ServerToServerRpcController {
- private int priority;
- // list of system tables that can possibly have server-server rpc's
- private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder()
- .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
- .add(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
- .getNameAsString())
- .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true)
- .getNameAsString())
- .build();
+ private int priority;
+ // list of system tables that can possibly have server-server rpc's
+ private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder()
+ .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
+ .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
+ .add(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
+ .getNameAsString())
+ .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true)
+ .getNameAsString())
+ .build();
- public ServerToServerRpcControllerImpl(
- Configuration conf) {
- super();
- this.priority = IndexUtil.getServerSidePriority(conf);
- }
+ public ServerToServerRpcControllerImpl(Configuration conf) {
+ super();
+ this.priority = IndexUtil.getServerSidePriority(conf);
+ }
- @Override
- public void setPriority(final TableName tn) {
- if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
- setPriority(this.priority);
- }
+ @Override
+ public void setPriority(final TableName tn) {
+ if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+ setPriority(this.priority);
}
+ }
+ @Override
+ public void setPriority(int priority) {
+ this.priority = priority;
+ }
- @Override public void setPriority(int priority) {
- this.priority = priority;
- }
-
-
- @Override public int getPriority() {
- return this.priority;
- }
+ @Override
+ public int getPriority() {
+ return this.priority;
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java
index 80e37ce64db..de7b950c282 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,20 +21,19 @@
import java.io.IOException;
import java.util.List;
-import net.jcip.annotations.Immutable;
-
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.schema.tuple.Tuple;
+import net.jcip.annotations.Immutable;
/**
- * Encapsulate deserialized hash cache from bytes into Map.
- * The Map uses the row key as the key and the row as the value.
- *
+ * Encapsulate deserialized hash cache from bytes into Map. The Map uses the row key as the key and
+ * the row as the value.
* @since 0.1
*/
@Immutable
public interface HashCache extends Closeable {
- public int getClientVersion();
- public List get(ImmutableBytesPtr hashKey) throws IOException;
+ public int getClientVersion();
+
+ public List get(ImmutableBytesPtr hashKey) throws IOException;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
index 9f3dd592336..b9cb21e156c 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.phoenix.cache;
import java.io.Closeable;
@@ -28,29 +27,32 @@
import org.apache.phoenix.util.ScanUtil;
public interface IndexMetaDataCache extends Closeable {
- public static final IndexMetaDataCache EMPTY_INDEX_META_DATA_CACHE = new IndexMetaDataCache() {
-
- @Override
- public void close() throws IOException {
- }
-
- @Override
- public List getIndexMaintainers() {
- return Collections.emptyList();
- }
-
- @Override
- public PhoenixTransactionContext getTransactionContext() {
- return null;
- }
-
- @Override
- public int getClientVersion() {
- return ScanUtil.UNKNOWN_CLIENT_VERSION;
- }
-
- };
- public List getIndexMaintainers();
- public PhoenixTransactionContext getTransactionContext();
- public int getClientVersion();
+ public static final IndexMetaDataCache EMPTY_INDEX_META_DATA_CACHE = new IndexMetaDataCache() {
+
+ @Override
+ public void close() throws IOException {
+ }
+
+ @Override
+ public List getIndexMaintainers() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public PhoenixTransactionContext getTransactionContext() {
+ return null;
+ }
+
+ @Override
+ public int getClientVersion() {
+ return ScanUtil.UNKNOWN_CLIENT_VERSION;
+ }
+
+ };
+
+ public List getIndexMaintainers();
+
+ public PhoenixTransactionContext getTransactionContext();
+
+ public int getClientVersion();
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
index ea6982d00a6..2a7e2a32e74 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
@@ -4,12 +4,12 @@
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
+ * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required by applicablelaw or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
@@ -17,70 +17,73 @@
*/
package org.apache.phoenix.cache;
-import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder;
-import org.apache.phoenix.thirdparty.com.google.common.cache.CacheLoader;
-import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache;
-import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.schema.IllegalDataException;
+import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder;
+import org.apache.phoenix.thirdparty.com.google.common.cache.CacheLoader;
+import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache;
+import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException;
import org.joda.time.DateTimeZone;
public class JodaTimezoneCache {
- public static final int CACHE_EXPRIRE_TIME_MINUTES = 10;
- private static final LoadingCache cachedJodaTimeZones = createTimezoneCache();
+ public static final int CACHE_EXPRIRE_TIME_MINUTES = 10;
+ private static final LoadingCache cachedJodaTimeZones =
+ createTimezoneCache();
- /**
- * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
- *
- * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
- * @return joda's DateTimeZone instance
- * @throws IllegalDataException if unknown timezone id is passed
- */
- public static DateTimeZone getInstance(ByteBuffer timezoneId) {
- try {
- return cachedJodaTimeZones.get(timezoneId);
- } catch (ExecutionException ex) {
- throw new IllegalDataException(ex);
- } catch (UncheckedExecutionException e) {
- throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array()));
- }
+ /**
+ * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+ * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g.
+ * Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+ public static DateTimeZone getInstance(ByteBuffer timezoneId) {
+ try {
+ return cachedJodaTimeZones.get(timezoneId);
+ } catch (ExecutionException ex) {
+ throw new IllegalDataException(ex);
+ } catch (UncheckedExecutionException e) {
+ throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array()));
}
+ }
- /**
- * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
- *
- * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
- * @return joda's DateTimeZone instance
- * @throws IllegalDataException if unknown timezone id is passed
- */
- public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) {
- return getInstance(ByteBuffer.wrap(timezoneId.copyBytes()));
- }
+ /**
+ * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+ * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g.
+ * Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+ public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) {
+ return getInstance(ByteBuffer.wrap(timezoneId.copyBytes()));
+ }
- /**
- * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
- *
- * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
- * @return joda's DateTimeZone instance
- * @throws IllegalDataException if unknown timezone id is passed
- */
- public static DateTimeZone getInstance(String timezoneId) {
- return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId)));
- }
+ /**
+ * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+ * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g.
+ * Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+ public static DateTimeZone getInstance(String timezoneId) {
+ return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId)));
+ }
- private static LoadingCache createTimezoneCache() {
- return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES).build(new CacheLoader() {
+ private static LoadingCache createTimezoneCache() {
+ return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES)
+ .build(new CacheLoader() {
- @Override
- public DateTimeZone load(ByteBuffer timezone) throws Exception {
- return DateTimeZone.forID(Bytes.toString(timezone.array()));
- }
- });
- }
+ @Override
+ public DateTimeZone load(ByteBuffer timezone) throws Exception {
+ return DateTimeZone.forID(Bytes.toString(timezone.array()));
+ }
+ });
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 9c6bb116952..64ed016bff1 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -51,14 +51,14 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.compile.ScanRanges;
-import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
-import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory;
import org.apache.phoenix.coprocessor.generated.ServerCacheFactoryProtos;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheRequest;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
+import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.job.JobManager.JobCallable;
import org.apache.phoenix.join.HashCacheFactory;
@@ -79,485 +79,497 @@
import org.slf4j.LoggerFactory;
/**
- *
* Client for sending cache to each region server
- *
- *
* @since 0.1
*/
public class ServerCacheClient {
- public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
- public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0};
- private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
- private static final Random RANDOM = new Random();
- public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server";
- private final PhoenixConnection connection;
- private final Map cacheUsingTableMap = new ConcurrentHashMap();
+ public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
+ public static final byte[] KEY_IN_FIRST_REGION = new byte[] { 0 };
+ private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
+ private static final Random RANDOM = new Random();
+ public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER =
+ "hash.join.server.cache.resend.per.server";
+ private final PhoenixConnection connection;
+ private final Map cacheUsingTableMap = new ConcurrentHashMap();
- /**
- * Construct client used to create a serialized cached snapshot of a table and send it to each region server
- * for caching during hash join processing.
- * @param connection the client connection
- *
- * TODO: instead of minMaxKeyRange, have an interface for iterating through ranges as we may be sending to
- * servers when we don't have to if the min is in first region and max is in last region, especially for point queries.
- */
- public ServerCacheClient(PhoenixConnection connection) {
- this.connection = connection;
- }
-
- public PhoenixConnection getConnection() {
- return connection;
- }
-
- /**
- * Client-side representation of a server cache. Call {@link #close()} when usage
- * is complete to free cache up on region server
- *
- *
- * @since 0.1
- */
- public class ServerCache implements SQLCloseable {
- private final int size;
- private final byte[] id;
- private final Map servers;
- private ImmutableBytesWritable cachePtr;
- private MemoryChunk chunk;
- private File outputFile;
- private long maxServerCacheTTL;
-
-
- public ServerCache(byte[] id, Set servers, ImmutableBytesWritable cachePtr,
- ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException {
- maxServerCacheTTL = services.getProps().getInt(
- QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB,
- QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
- this.id = id;
- this.servers = new HashMap();
- long currentTime = EnvironmentEdgeManager.currentTimeMillis();
- for(HRegionLocation loc : servers) {
- this.servers.put(loc, currentTime);
- }
- this.size = cachePtr.getLength();
- if (storeCacheOnClient) {
- try {
- this.chunk = services.getMemoryManager().allocate(cachePtr.getLength());
- this.cachePtr = cachePtr;
- } catch (InsufficientMemoryException e) {
- this.outputFile = File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
- .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
- try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) {
- fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
- }
- }
- }
-
- }
+ /**
+ * Construct client used to create a serialized cached snapshot of a table and send it to each
+ * region server for caching during hash join processing.
+ * @param connection the client connection TODO: instead of minMaxKeyRange, have an interface for
+ * iterating through ranges as we may be sending to servers when we don't have
+ * to if the min is in first region and max is in last region, especially for
+ * point queries.
+ */
+ public ServerCacheClient(PhoenixConnection connection) {
+ this.connection = connection;
+ }
- public ImmutableBytesWritable getCachePtr() throws IOException {
- if(this.outputFile!=null){
- try (InputStream fio = Files.newInputStream(outputFile.toPath())) {
- byte[] b = new byte[this.size];
- fio.read(b);
- cachePtr = new ImmutableBytesWritable(b);
- }
- }
- return cachePtr;
- }
+ public PhoenixConnection getConnection() {
+ return connection;
+ }
- /**
- * Gets the size in bytes of hash cache
- */
- public int getSize() {
- return size;
- }
+ /**
+ * Client-side representation of a server cache. Call {@link #close()} when usage is complete to
+ * free cache up on region server
+ * @since 0.1
+ */
+ public class ServerCache implements SQLCloseable {
+ private final int size;
+ private final byte[] id;
+ private final Map servers;
+ private ImmutableBytesWritable cachePtr;
+ private MemoryChunk chunk;
+ private File outputFile;
+ private long maxServerCacheTTL;
- /**
- * Gets the unique identifier for this hash cache
- */
- public byte[] getId() {
- return id;
+ public ServerCache(byte[] id, Set servers, ImmutableBytesWritable cachePtr,
+ ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException {
+ maxServerCacheTTL =
+ services.getProps().getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB,
+ QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
+ this.id = id;
+ this.servers = new HashMap();
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ for (HRegionLocation loc : servers) {
+ this.servers.put(loc, currentTime);
+ }
+ this.size = cachePtr.getLength();
+ if (storeCacheOnClient) {
+ try {
+ this.chunk = services.getMemoryManager().allocate(cachePtr.getLength());
+ this.cachePtr = cachePtr;
+ } catch (InsufficientMemoryException e) {
+ this.outputFile =
+ File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
+ .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
+ try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) {
+ fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
+ }
}
+ }
- public boolean addServer(HRegionLocation loc) {
- if(this.servers.containsKey(loc)) {
- return false;
- } else {
- this.servers.put(loc, EnvironmentEdgeManager.currentTimeMillis());
- return true;
- }
- }
+ }
- public boolean isExpired(HRegionLocation loc) {
- if(this.servers.containsKey(loc)) {
- Long time = this.servers.get(loc);
- if(EnvironmentEdgeManager.currentTimeMillis() - time > maxServerCacheTTL)
- return true; // cache was send more than maxTTL ms ago, expecting that it's expired
- } else {
- return false; // should be on server yet.
- }
- return false; // Unknown region location. Need to send the cache.
+ public ImmutableBytesWritable getCachePtr() throws IOException {
+ if (this.outputFile != null) {
+ try (InputStream fio = Files.newInputStream(outputFile.toPath())) {
+ byte[] b = new byte[this.size];
+ fio.read(b);
+ cachePtr = new ImmutableBytesWritable(b);
}
+ }
+ return cachePtr;
+ }
+ /**
+ * Gets the size in bytes of hash cache
+ */
+ public int getSize() {
+ return size;
+ }
-
- /**
- * Call to free up cache on region servers when no longer needed
- */
- @Override
- public void close() throws SQLException {
- try{
- removeServerCache(this, servers.keySet());
- }finally{
- cachePtr = null;
- if (chunk != null) {
- chunk.close();
- }
- if (outputFile != null) {
- outputFile.delete();
- }
- }
- }
+ /**
+ * Gets the unique identifier for this hash cache
+ */
+ public byte[] getId() {
+ return id;
}
- public ServerCache createServerCache(byte[] cacheId, QueryPlan delegate)
- throws SQLException, IOException {
- PTable cacheUsingTable = delegate.getTableRef().getTable();
- ConnectionQueryServices services = delegate.getContext().getConnection().getQueryServices();
- List locations = services.getAllTableRegions(
- cacheUsingTable.getPhysicalName().getBytes(),
- delegate.getContext().getStatement().getQueryTimeoutInMillis());
- int nRegions = locations.size();
- Set servers = new HashSet<>(nRegions);
- cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
- return new ServerCache(cacheId, servers, new ImmutableBytesWritable(
- new byte[]{}), services, false);
+ public boolean addServer(HRegionLocation loc) {
+ if (this.servers.containsKey(loc)) {
+ return false;
+ } else {
+ this.servers.put(loc, EnvironmentEdgeManager.currentTimeMillis());
+ return true;
+ }
}
- public ServerCache addServerCache(
- ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState,
- final ServerCacheFactory cacheFactory, final PTable cacheUsingTable)
- throws SQLException {
- return addServerCache(keyRanges, cachePtr, txState, cacheFactory, cacheUsingTable, false);
+ public boolean isExpired(HRegionLocation loc) {
+ if (this.servers.containsKey(loc)) {
+ Long time = this.servers.get(loc);
+ if (EnvironmentEdgeManager.currentTimeMillis() - time > maxServerCacheTTL) return true; // cache
+ // was
+ // send
+ // more
+ // than
+ // maxTTL
+ // ms
+ // ago,
+ // expecting
+ // that
+ // it's
+ // expired
+ } else {
+ return false; // should be on server yet.
+ }
+ return false; // Unknown region location. Need to send the cache.
}
- public ServerCache addServerCache(
- ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState,
- final ServerCacheFactory cacheFactory, final PTable cacheUsingTable,
- boolean storeCacheOnClient) throws SQLException {
- final byte[] cacheId = ServerCacheClient.generateId();
- return addServerCache(keyRanges, cacheId, cachePtr, txState, cacheFactory,
- cacheUsingTable, false, storeCacheOnClient);
+ /**
+ * Call to free up cache on region servers when no longer needed
+ */
+ @Override
+ public void close() throws SQLException {
+ try {
+ removeServerCache(this, servers.keySet());
+ } finally {
+ cachePtr = null;
+ if (chunk != null) {
+ chunk.close();
+ }
+ if (outputFile != null) {
+ outputFile.delete();
+ }
+ }
}
+ }
- public ServerCache addServerCache(
- ScanRanges keyRanges, final byte[] cacheId, final ImmutableBytesWritable cachePtr,
- final byte[] txState, final ServerCacheFactory cacheFactory,
- final PTable cacheUsingTable, final boolean usePersistentCache,
- boolean storeCacheOnClient) throws SQLException {
- ConnectionQueryServices services = connection.getQueryServices();
- List closeables = new ArrayList();
- ServerCache hashCacheSpec = null;
- SQLException firstException = null;
- /**
- * Execute EndPoint in parallel on each server to send compressed hash cache
- */
- // TODO: generalize and package as a per region server EndPoint caller
- // (ideally this would be functionality provided by the coprocessor framework)
- boolean success = false;
- ExecutorService executor = services.getExecutor();
- List> futures = Collections.emptyList();
- try {
- int queryTimeout = connection.getQueryServices().getProps()
- .getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
- QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
- List locations =
- services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(),
- queryTimeout);
- int nRegions = locations.size();
- // Size these based on worst case
- futures = new ArrayList>(nRegions);
- Set servers = new HashSet(nRegions);
- for (HRegionLocation entry : locations) {
- // Keep track of servers we've sent to and only send once
- byte[] regionStartKey = entry.getRegion().getStartKey();
- byte[] regionEndKey = entry.getRegion().getEndKey();
- if ( ! servers.contains(entry) &&
- keyRanges.intersectRegion(regionStartKey, regionEndKey,
- cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
- // Call RPC once per server
- servers.add(entry);
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(addCustomAnnotations(
- "Adding cache entry to be sent for " + entry, connection));
- }
- final byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
- final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
- closeables.add(htable);
- futures.add(executor.submit(new JobCallable() {
-
- @Override
- public Boolean call() throws Exception {
- return addServerCache(htable, key, cacheUsingTable, cacheId, cachePtr, cacheFactory, txState, usePersistentCache);
- }
-
- /**
- * Defines the grouping for round robin behavior. All threads spawned to process
- * this scan will be grouped together and time sliced with other simultaneously
- * executing parallel scans.
- */
- @Override
- public Object getJobId() {
- return ServerCacheClient.this;
- }
-
- @Override
- public TaskExecutionMetricsHolder getTaskExecutionMetric() {
- return NO_OP_INSTANCE;
- }
- }));
- } else {
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(addCustomAnnotations(
- "NOT adding cache entry to be sent for " + entry +
- " since one already exists for that entry", connection));
- }
- }
+ public ServerCache createServerCache(byte[] cacheId, QueryPlan delegate)
+ throws SQLException, IOException {
+ PTable cacheUsingTable = delegate.getTableRef().getTable();
+ ConnectionQueryServices services = delegate.getContext().getConnection().getQueryServices();
+ List locations =
+ services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(),
+ delegate.getContext().getStatement().getQueryTimeoutInMillis());
+ int nRegions = locations.size();
+ Set servers = new HashSet<>(nRegions);
+ cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
+ return new ServerCache(cacheId, servers, new ImmutableBytesWritable(new byte[] {}), services,
+ false);
+ }
+
+ public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr,
+ final byte[] txState, final ServerCacheFactory cacheFactory, final PTable cacheUsingTable)
+ throws SQLException {
+ return addServerCache(keyRanges, cachePtr, txState, cacheFactory, cacheUsingTable, false);
+ }
+
+ public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr,
+ final byte[] txState, final ServerCacheFactory cacheFactory, final PTable cacheUsingTable,
+ boolean storeCacheOnClient) throws SQLException {
+ final byte[] cacheId = ServerCacheClient.generateId();
+ return addServerCache(keyRanges, cacheId, cachePtr, txState, cacheFactory, cacheUsingTable,
+ false, storeCacheOnClient);
+ }
+
+ public ServerCache addServerCache(ScanRanges keyRanges, final byte[] cacheId,
+ final ImmutableBytesWritable cachePtr, final byte[] txState,
+ final ServerCacheFactory cacheFactory, final PTable cacheUsingTable,
+ final boolean usePersistentCache, boolean storeCacheOnClient) throws SQLException {
+ ConnectionQueryServices services = connection.getQueryServices();
+ List closeables = new ArrayList();
+ ServerCache hashCacheSpec = null;
+ SQLException firstException = null;
+ /**
+ * Execute EndPoint in parallel on each server to send compressed hash cache
+ */
+ // TODO: generalize and package as a per region server EndPoint caller
+ // (ideally this would be functionality provided by the coprocessor framework)
+ boolean success = false;
+ ExecutorService executor = services.getExecutor();
+ List> futures = Collections.emptyList();
+ try {
+ int queryTimeout = connection.getQueryServices().getProps().getInt(
+ QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
+ List locations =
+ services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(), queryTimeout);
+ int nRegions = locations.size();
+ // Size these based on worst case
+ futures = new ArrayList>(nRegions);
+ Set servers = new HashSet(nRegions);
+ for (HRegionLocation entry : locations) {
+ // Keep track of servers we've sent to and only send once
+ byte[] regionStartKey = entry.getRegion().getStartKey();
+ byte[] regionEndKey = entry.getRegion().getEndKey();
+ if (
+ !servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey,
+ cacheUsingTable.getIndexType() == IndexType.LOCAL)
+ ) {
+ // Call RPC once per server
+ servers.add(entry);
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(
+ addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
+ }
+ final byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
+ final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
+ closeables.add(htable);
+ futures.add(executor.submit(new JobCallable() {
+
+ @Override
+ public Boolean call() throws Exception {
+ return addServerCache(htable, key, cacheUsingTable, cacheId, cachePtr, cacheFactory,
+ txState, usePersistentCache);
+ }
+
+ /**
+ * Defines the grouping for round robin behavior. All threads spawned to process this
+ * scan will be grouped together and time sliced with other simultaneously executing
+ * parallel scans.
+ */
+ @Override
+ public Object getJobId() {
+ return ServerCacheClient.this;
}
- hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient);
- // Execute in parallel
- int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
- for (Future future : futures) {
- future.get(timeoutMs, TimeUnit.MILLISECONDS);
+ @Override
+ public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+ return NO_OP_INSTANCE;
}
+ }));
+ } else {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry
+ + " since one already exists for that entry", connection));
+ }
+ }
+ }
+
+ hashCacheSpec = new ServerCache(cacheId, servers, cachePtr, services, storeCacheOnClient);
+ // Execute in parallel
+ int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
+ QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
+ for (Future future : futures) {
+ future.get(timeoutMs, TimeUnit.MILLISECONDS);
+ }
- cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
- success = true;
- } catch (SQLException e) {
- firstException = e;
- } catch (Exception e) {
+ cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
+ success = true;
+ } catch (SQLException e) {
+ firstException = e;
+ } catch (Exception e) {
+ firstException = new SQLException(e);
+ } finally {
+ try {
+ if (!success) {
+ if (hashCacheSpec != null) {
+ SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
+ }
+ SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
+ for (Future future : futures) {
+ future.cancel(true);
+ }
+ }
+ } finally {
+ try {
+ Closeables.closeAll(closeables);
+ } catch (IOException e) {
+ if (firstException == null) {
firstException = new SQLException(e);
+ }
} finally {
- try {
- if (!success) {
- if (hashCacheSpec != null) {
- SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
- }
- SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
- for (Future future : futures) {
- future.cancel(true);
- }
- }
- } finally {
- try {
- Closeables.closeAll(closeables);
- } catch (IOException e) {
- if (firstException == null) {
- firstException = new SQLException(e);
- }
- } finally {
- if (firstException != null) {
- throw firstException;
- }
- }
- }
+ if (firstException != null) {
+ throw firstException;
+ }
}
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(addCustomAnnotations("Cache " + cacheId +
- " successfully added to servers.", connection));
- }
- return hashCacheSpec;
+ }
}
-
- /**
- * Remove the cached table from all region servers
- * @throws SQLException
- * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
- */
- private void removeServerCache(final ServerCache cache, Set remainingOnServers) throws SQLException {
- Table iterateOverTable = null;
- final byte[] cacheId = cache.getId();
- try {
- ConnectionQueryServices services = connection.getQueryServices();
- Throwable lastThrowable = null;
- final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId));
- byte[] tableName = cacheUsingTable.getPhysicalName().getBytes();
- iterateOverTable = services.getTable(tableName);
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(
+ addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
+ }
+ return hashCacheSpec;
+ }
- int queryTimeout = connection.getQueryServices().getProps()
- .getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
- QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
- List locations = services.getAllTableRegions(tableName, queryTimeout);
+ /**
+ * Remove the cached table from all region servers
+ * @throws IllegalStateException if hashed table cannot be removed on any region server on which
+ * it was added
+ */
+ private void removeServerCache(final ServerCache cache, Set remainingOnServers)
+ throws SQLException {
+ Table iterateOverTable = null;
+ final byte[] cacheId = cache.getId();
+ try {
+ ConnectionQueryServices services = connection.getQueryServices();
+ Throwable lastThrowable = null;
+ final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId));
+ byte[] tableName = cacheUsingTable.getPhysicalName().getBytes();
+ iterateOverTable = services.getTable(tableName);
- /**
- * Allow for the possibility that the region we based where to send our cache has split and been relocated
- * to another region server *after* we sent it, but before we removed it. To accommodate this, we iterate
- * through the current metadata boundaries and remove the cache once for each server that we originally sent
- * to.
- */
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(addCustomAnnotations(
- "Removing Cache " + cacheId + " from servers.", connection));
- }
- for (HRegionLocation entry : locations) {
- // Call once per server
- if (remainingOnServers.contains(entry)) {
+ int queryTimeout = connection.getQueryServices().getProps().getInt(
+ QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
+ List locations = services.getAllTableRegions(tableName, queryTimeout);
+
+ /**
+ * Allow for the possibility that the region we based where to send our cache has split and
+ * been relocated to another region server *after* we sent it, but before we removed it. To
+ * accommodate this, we iterate through the current metadata boundaries and remove the cache
+ * once for each server that we originally sent to.
+ */
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER
+ .debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+ }
+ for (HRegionLocation entry : locations) {
+ // Call once per server
+ if (remainingOnServers.contains(entry)) {
+ try {
+ byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
+ iterateOverTable.coprocessorService(ServerCachingService.class, key, key,
+ new Batch.Call() {
+ @Override
+ public RemoveServerCacheResponse call(ServerCachingService instance)
+ throws IOException {
+ ServerRpcController controller = new ServerRpcController();
+ BlockingRpcCallback rpcCallback =
+ new BlockingRpcCallback();
+ RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
+ final byte[] tenantIdBytes;
+ if (cacheUsingTable.isMultiTenant()) {
try {
- byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
- iterateOverTable.coprocessorService(ServerCachingService.class, key, key,
- new Batch.Call() {
- @Override
- public RemoveServerCacheResponse call(ServerCachingService instance)
- throws IOException {
- ServerRpcController controller = new ServerRpcController();
- BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
- RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest
- .newBuilder();
- final byte[] tenantIdBytes;
- if (cacheUsingTable.isMultiTenant()) {
- try {
- tenantIdBytes = connection.getTenantId() == null ? null
- : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
- cacheUsingTable.getBucketNum() != null,
- connection.getTenantId(),
- cacheUsingTable.getViewIndexId() != null);
- } catch (SQLException e) {
- throw new IOException(e);
- }
- } else {
- tenantIdBytes = connection.getTenantId() == null ? null
- : connection.getTenantId().getBytes();
- }
- if (tenantIdBytes != null) {
- builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
- }
- builder.setCacheId(ByteStringer.wrap(cacheId));
- instance.removeServerCache(controller, builder.build(), rpcCallback);
- if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
- return rpcCallback.get();
- }
- });
- remainingOnServers.remove(entry);
- } catch (Throwable t) {
- lastThrowable = t;
- LOGGER.error(addCustomAnnotations(
- "Error trying to remove hash cache for " + entry,
- connection), t);
+ tenantIdBytes = connection.getTenantId() == null
+ ? null
+ : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
+ cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
+ cacheUsingTable.getViewIndexId() != null);
+ } catch (SQLException e) {
+ throw new IOException(e);
}
+ } else {
+ tenantIdBytes =
+ connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
+ }
+ if (tenantIdBytes != null) {
+ builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
+ }
+ builder.setCacheId(ByteStringer.wrap(cacheId));
+ instance.removeServerCache(controller, builder.build(), rpcCallback);
+ if (controller.getFailedOn() != null) {
+ throw controller.getFailedOn();
+ }
+ return rpcCallback.get();
}
- }
- if (!remainingOnServers.isEmpty()) {
- LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
- + remainingOnServers, connection),
- lastThrowable);
- }
- } finally {
- cacheUsingTableMap.remove(Bytes.mapKey(cacheId));
- Closeables.closeQuietly(iterateOverTable);
+ });
+ remainingOnServers.remove(entry);
+ } catch (Throwable t) {
+ lastThrowable = t;
+ LOGGER.error(
+ addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
+ t);
+ }
}
+ }
+ if (!remainingOnServers.isEmpty()) {
+ LOGGER.warn(
+ addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+ lastThrowable);
+ }
+ } finally {
+ cacheUsingTableMap.remove(Bytes.mapKey(cacheId));
+ Closeables.closeQuietly(iterateOverTable);
}
+ }
- /**
- * Create an ID to keep the cached information across other operations independent.
- * Using simple long random number, since the length of time we need this to be unique
- * is very limited.
- */
- public static byte[] generateId() {
- long rand = RANDOM.nextLong();
- return Bytes.toBytes(rand);
+ /**
+ * Create an ID to keep the cached information across other operations independent. Using simple
+ * long random number, since the length of time we need this to be unique is very limited.
+ */
+ public static byte[] generateId() {
+ long rand = RANDOM.nextLong();
+ return Bytes.toBytes(rand);
+ }
+
+ public static String idToString(byte[] uuid) {
+ assert (uuid.length == Bytes.SIZEOF_LONG);
+ return Long.toString(Bytes.toLong(uuid));
+ }
+
+ private static byte[] getKeyInRegion(byte[] regionStartKey) {
+ assert (regionStartKey != null);
+ if (Bytes.equals(regionStartKey, HConstants.EMPTY_START_ROW)) {
+ return KEY_IN_FIRST_REGION;
}
-
- public static String idToString(byte[] uuid) {
- assert(uuid.length == Bytes.SIZEOF_LONG);
- return Long.toString(Bytes.toLong(uuid));
+ return regionStartKey;
+ }
+
+ public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache,
+ HashCacheFactory cacheFactory, byte[] txState, PTable pTable) throws Exception {
+ Table table = null;
+ boolean success = true;
+ byte[] cacheId = cache.getId();
+ try {
+ ConnectionQueryServices services = connection.getQueryServices();
+
+ byte[] tableName = pTable.getPhysicalName().getBytes();
+ table = services.getTable(tableName);
+ HRegionLocation tableRegionLocation =
+ services.getTableRegionLocation(tableName, startkeyOfRegion);
+ if (cache.isExpired(tableRegionLocation)) {
+ return false;
+ }
+ if (
+ cache.addServer(tableRegionLocation)
+ || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER, false)
+ ) {
+ success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(),
+ cacheFactory, txState, false);
+ }
+ return success;
+ } finally {
+ Closeables.closeQuietly(table);
}
+ }
- private static byte[] getKeyInRegion(byte[] regionStartKey) {
- assert (regionStartKey != null);
- if (Bytes.equals(regionStartKey, HConstants.EMPTY_START_ROW)) {
- return KEY_IN_FIRST_REGION;
- }
- return regionStartKey;
+ public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable,
+ final byte[] cacheId, final ImmutableBytesWritable cachePtr,
+ final ServerCacheFactory cacheFactory, final byte[] txState, final boolean usePersistentCache)
+ throws Exception {
+ byte[] keyInRegion = getKeyInRegion(key);
+ final Map results;
+
+ AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
+ final byte[] tenantIdBytes;
+ if (cacheUsingTable.isMultiTenant()) {
+ try {
+ tenantIdBytes = connection.getTenantId() == null
+ ? null
+ : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
+ cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
+ cacheUsingTable.getViewIndexId() != null);
+ } catch (SQLException e) {
+ throw new IOException(e);
+ }
+ } else {
+ tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
+ }
+ if (tenantIdBytes != null) {
+ builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
}
+ builder.setCacheId(ByteStringer.wrap(cacheId));
+ builder.setUsePersistentCache(usePersistentCache);
+ builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
+ builder.setHasProtoBufIndexMaintainer(true);
+ ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider =
+ ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
+ svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
+ builder.setCacheFactory(svrCacheFactoryBuider.build());
+ builder.setTxState(ByteStringer.wrap(txState));
+ builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION);
+ final AddServerCacheRequest request = builder.build();
- public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, HashCacheFactory cacheFactory,
- byte[] txState, PTable pTable) throws Exception {
- Table table = null;
- boolean success = true;
- byte[] cacheId = cache.getId();
- try {
- ConnectionQueryServices services = connection.getQueryServices();
-
- byte[] tableName = pTable.getPhysicalName().getBytes();
- table = services.getTable(tableName);
- HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, startkeyOfRegion);
- if(cache.isExpired(tableRegionLocation)) {
- return false;
+ try {
+ results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion,
+ new Batch.Call() {
+ @Override
+ public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
+ ServerRpcController controller = new ServerRpcController();
+ BlockingRpcCallback rpcCallback =
+ new BlockingRpcCallback();
+ instance.addServerCache(controller, request, rpcCallback);
+ if (controller.getFailedOn() != null) {
+ throw controller.getFailedOn();
}
- if (cache.addServer(tableRegionLocation) || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER,false)) {
- success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(), cacheFactory,
- txState, false);
- }
- return success;
- } finally {
- Closeables.closeQuietly(table);
- }
+ return rpcCallback.get();
+ }
+ });
+ } catch (Throwable t) {
+ throw new Exception(t);
}
-
- public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId,
- final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final byte[] txState, final boolean usePersistentCache)
- throws Exception {
- byte[] keyInRegion = getKeyInRegion(key);
- final Map results;
-
- AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
- final byte[] tenantIdBytes;
- if (cacheUsingTable.isMultiTenant()) {
- try {
- tenantIdBytes = connection.getTenantId() == null ? null
- : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
- cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
- cacheUsingTable.getViewIndexId() != null);
- } catch (SQLException e) {
- throw new IOException(e);
- }
- } else {
- tenantIdBytes = connection.getTenantId() == null ? null
- : connection.getTenantId().getBytes();
- }
- if (tenantIdBytes != null) {
- builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
- }
- builder.setCacheId(ByteStringer.wrap(cacheId));
- builder.setUsePersistentCache(usePersistentCache);
- builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
- builder.setHasProtoBufIndexMaintainer(true);
- ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory
- .newBuilder();
- svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
- builder.setCacheFactory(svrCacheFactoryBuider.build());
- builder.setTxState(ByteStringer.wrap(txState));
- builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION);
- final AddServerCacheRequest request = builder.build();
-
- try {
- results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion,
- new Batch.Call() {
- @Override
- public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
- ServerRpcController controller = new ServerRpcController();
- BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
- instance.addServerCache(controller, request, rpcCallback);
- if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
- return rpcCallback.get();
- }
- });
- } catch (Throwable t) {
- throw new Exception(t);
- }
- if (results != null && results.size() == 1) { return results.values().iterator().next().getReturn(); }
- return false;
+ if (results != null && results.size() == 1) {
+ return results.values().iterator().next().getReturn();
}
-
+ return false;
+ }
+
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
index f251dc7123c..7cf54a4d009 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
@@ -1,12 +1,13 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +23,8 @@
* Interface for server side metadata cache hosted on each region server.
*/
public interface ServerMetadataCache {
- long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName)
- throws SQLException;
- void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName);
+ long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName)
+ throws SQLException;
+
+ void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName);
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java
index 5f9aa104556..a2beb6d8e5d 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,10 +17,11 @@
*/
package org.apache.phoenix.cache;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Properties;
-import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -33,124 +34,118 @@
import org.apache.phoenix.thirdparty.com.google.common.cache.Cache;
import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder;
import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
/**
* This manages the cache for all the objects(data table, views, indexes) on each region server.
* Currently, it only stores LAST_DDL_TIMESTAMP in the cache.
*/
public class ServerMetadataCacheImpl implements ServerMetadataCache {
- protected Configuration conf;
- // key is the combination of , value is the lastDDLTimestamp
- protected final Cache lastDDLTimestampMap;
- private static final Logger LOGGER = LoggerFactory.getLogger(ServerMetadataCacheImpl.class);
- private static final String PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE
- = "phoenix.coprocessor.regionserver.cache.size";
- private static final long DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE = 10000L;
- private static volatile ServerMetadataCacheImpl cacheInstance;
- private MetricsMetadataCachingSource metricsSource;
+ protected Configuration conf;
+ // key is the combination of , value is the lastDDLTimestamp
+ protected final Cache lastDDLTimestampMap;
+ private static final Logger LOGGER = LoggerFactory.getLogger(ServerMetadataCacheImpl.class);
+ private static final String PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE =
+ "phoenix.coprocessor.regionserver.cache.size";
+ private static final long DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE = 10000L;
+ private static volatile ServerMetadataCacheImpl cacheInstance;
+ private MetricsMetadataCachingSource metricsSource;
- /**
- * Creates/gets an instance of ServerMetadataCache.
- *
- * @param conf configuration
- * @return cache
- */
- public static ServerMetadataCacheImpl getInstance(Configuration conf) {
- ServerMetadataCacheImpl result = cacheInstance;
+ /**
+ * Creates/gets an instance of ServerMetadataCache.
+ * @param conf configuration
+ */
+ public static ServerMetadataCacheImpl getInstance(Configuration conf) {
+ ServerMetadataCacheImpl result = cacheInstance;
+ if (result == null) {
+ synchronized (ServerMetadataCacheImpl.class) {
+ result = cacheInstance;
if (result == null) {
- synchronized (ServerMetadataCacheImpl.class) {
- result = cacheInstance;
- if (result == null) {
- cacheInstance = result = new ServerMetadataCacheImpl(conf);
- }
- }
+ cacheInstance = result = new ServerMetadataCacheImpl(conf);
}
- return result;
+ }
}
+ return result;
+ }
- public ServerMetadataCacheImpl(Configuration conf) {
- this.conf = HBaseConfiguration.create(conf);
- this.metricsSource = MetricsPhoenixCoprocessorSourceFactory
- .getInstance().getMetadataCachingSource();
- long maxSize = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE,
- DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE);
- lastDDLTimestampMap = CacheBuilder.newBuilder()
- .removalListener((RemovalListener) notification -> {
- String key = notification.getKey().toString();
- LOGGER.debug("Expiring " + key + " because of "
- + notification.getCause().name());
- })
- // maximum number of entries this cache can handle.
- .maximumSize(maxSize)
- .build();
- }
+ public ServerMetadataCacheImpl(Configuration conf) {
+ this.conf = HBaseConfiguration.create(conf);
+ this.metricsSource =
+ MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource();
+ long maxSize = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE,
+ DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE);
+ lastDDLTimestampMap = CacheBuilder.newBuilder()
+ .removalListener((RemovalListener) notification -> {
+ String key = notification.getKey().toString();
+ LOGGER.debug("Expiring " + key + " because of " + notification.getCause().name());
+ })
+ // maximum number of entries this cache can handle.
+ .maximumSize(maxSize).build();
+ }
- /**
- * Returns the last DDL timestamp from the table.
- * If not found in cache, then query SYSCAT regionserver.
- * @param tenantID tenant id
- * @param schemaName schema name
- * @param tableName table name
- * @return last DDL timestamp
- * @throws Exception
- */
- public long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName)
- throws SQLException {
- byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName);
- ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey);
- // Lookup in cache if present.
- Long lastDDLTimestamp = lastDDLTimestampMap.getIfPresent(tableKeyPtr);
- if (lastDDLTimestamp != null) {
- metricsSource.incrementRegionServerMetadataCacheHitCount();
- LOGGER.trace("Retrieving last ddl timestamp value from cache for " + "schema: {}, " +
- "table: {}", Bytes.toString(schemaName), Bytes.toString(tableName));
- return lastDDLTimestamp;
- }
- metricsSource.incrementRegionServerMetadataCacheMissCount();
- PTable table;
- String tenantIDStr = Bytes.toString(tenantID);
- if (tenantIDStr == null || tenantIDStr.isEmpty()) {
- tenantIDStr = null;
- }
- Properties properties = new Properties();
- if (tenantIDStr != null) {
- properties.setProperty(TENANT_ID_ATTRIB, tenantIDStr);
- }
- try (Connection connection = getConnection(properties)) {
- // Using PhoenixConnection#getTableFromServerNoCache to completely bypass CQSI cache.
- table = connection.unwrap(PhoenixConnection.class)
- .getTableFromServerNoCache(schemaName, tableName);
- // TODO PhoenixConnection#getTableFromServerNoCache can throw TableNotFoundException.
- // In that case, do we want to throw non retryable exception back to the client?
- // Update cache with the latest DDL timestamp from SYSCAT server.
- lastDDLTimestampMap.put(tableKeyPtr, table.getLastDDLTimestamp());
- }
- return table.getLastDDLTimestamp();
+ /**
+ * Returns the last DDL timestamp from the table. If not found in cache, then query SYSCAT
+ * regionserver.
+ * @param tenantID tenant id
+ * @param schemaName schema name
+ * @param tableName table name
+ * @return last DDL timestamp
+ */
+ public long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName)
+ throws SQLException {
+ byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName);
+ ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey);
+ // Lookup in cache if present.
+ Long lastDDLTimestamp = lastDDLTimestampMap.getIfPresent(tableKeyPtr);
+ if (lastDDLTimestamp != null) {
+ metricsSource.incrementRegionServerMetadataCacheHitCount();
+ LOGGER.trace(
+ "Retrieving last ddl timestamp value from cache for " + "schema: {}, " + "table: {}",
+ Bytes.toString(schemaName), Bytes.toString(tableName));
+ return lastDDLTimestamp;
}
-
- /**
- * Invalidate cache for the given tenantID, schema name and table name.
- * Guava cache is thread safe so we don't have to synchronize it explicitly.
- * @param tenantID tenantID
- * @param schemaName schemaName
- * @param tableName tableName
- */
- public void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName) {
- LOGGER.info("Invalidating server metadata cache for tenantID: {}, schema: {}, table: {}",
- Bytes.toString(tenantID), Bytes.toString(schemaName), Bytes.toString(tableName));
- byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName);
- ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey);
- lastDDLTimestampMap.invalidate(tableKeyPtr);
+ metricsSource.incrementRegionServerMetadataCacheMissCount();
+ PTable table;
+ String tenantIDStr = Bytes.toString(tenantID);
+ if (tenantIDStr == null || tenantIDStr.isEmpty()) {
+ tenantIDStr = null;
}
-
- protected Connection getConnection(Properties properties) throws SQLException {
- return QueryUtil.getConnectionOnServer(properties, this.conf);
+ Properties properties = new Properties();
+ if (tenantIDStr != null) {
+ properties.setProperty(TENANT_ID_ATTRIB, tenantIDStr);
}
+ try (Connection connection = getConnection(properties)) {
+ // Using PhoenixConnection#getTableFromServerNoCache to completely bypass CQSI cache.
+ table =
+ connection.unwrap(PhoenixConnection.class).getTableFromServerNoCache(schemaName, tableName);
+ // TODO PhoenixConnection#getTableFromServerNoCache can throw TableNotFoundException.
+ // In that case, do we want to throw non retryable exception back to the client?
+ // Update cache with the latest DDL timestamp from SYSCAT server.
+ lastDDLTimestampMap.put(tableKeyPtr, table.getLastDDLTimestamp());
+ }
+ return table.getLastDDLTimestamp();
+ }
+
+ /**
+ * Invalidate cache for the given tenantID, schema name and table name. Guava cache is thread safe
+ * so we don't have to synchronize it explicitly.
+ * @param tenantID tenantID
+ * @param schemaName schemaName
+ * @param tableName tableName
+ */
+ public void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName) {
+ LOGGER.info("Invalidating server metadata cache for tenantID: {}, schema: {}, table: {}",
+ Bytes.toString(tenantID), Bytes.toString(schemaName), Bytes.toString(tableName));
+ byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName);
+ ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey);
+ lastDDLTimestampMap.invalidate(tableKeyPtr);
+ }
+
+ protected Connection getConnection(Properties properties) throws SQLException {
+ return QueryUtil.getConnectionOnServer(properties, this.conf);
+ }
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java
index e36fd09a980..11ad61a74eb 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,18 +25,20 @@
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.memory.MemoryManager;
-
/**
- *
* Inteface to set and set cached values for a tenant
- *
- *
* @since 0.1
*/
public interface TenantCache {
- MemoryManager getMemoryManager();
- Closeable getServerCache(ImmutableBytesPtr cacheId);
- Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, boolean usePersistentCache, int clientVersion) throws SQLException;
- void removeServerCache(ImmutableBytesPtr cacheId);
- void removeAllServerCache();
+ MemoryManager getMemoryManager();
+
+ Closeable getServerCache(ImmutableBytesPtr cacheId);
+
+ Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr,
+ byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer,
+ boolean usePersistentCache, int clientVersion) throws SQLException;
+
+ void removeServerCache(ImmutableBytesPtr cacheId);
+
+ void removeAllServerCache();
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
index 54afaf6c4d1..f2bb7238fed 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -30,258 +30,256 @@
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.memory.MemoryManager;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
-import org.apache.phoenix.util.Closeables;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import org.apache.phoenix.thirdparty.com.google.common.base.Ticker;
import org.apache.phoenix.thirdparty.com.google.common.cache.Cache;
import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder;
import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener;
import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalNotification;
+import org.apache.phoenix.util.Closeables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- *
- * Cache per tenant on server side. Tracks memory usage for each
- * tenat as well and rolling up usage to global memory manager.
- *
- *
+ * Cache per tenant on server side. Tracks memory usage for each tenat as well and rolling up usage
+ * to global memory manager.
* @since 0.1
*/
public class TenantCacheImpl implements TenantCache {
- private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class);
- private final int maxTimeToLiveMs;
- private final int maxPersistenceTimeToLiveMs;
- private final MemoryManager memoryManager;
- private final Ticker ticker;
-
- // Two caches exist: the "serverCaches" cache which is used for handling live
- // queries, and the "persistentServerCaches" cache which is used to store data
- // between queries. If we are out of memory, attempt to clear out entries from
- // the persistent cache before throwing an exception.
- private volatile Cache serverCaches;
- private volatile Cache persistentServerCaches;
-
- private final long EVICTION_MARGIN_BYTES = 10000000;
-
- private static class CacheEntry implements Comparable, Closeable {
- private ImmutableBytesPtr cacheId;
- private ImmutableBytesWritable cachePtr;
- private int hits;
- private int liveQueriesCount;
- private boolean usePersistentCache;
- private long size;
- private Closeable closeable;
-
- public CacheEntry(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr,
- ServerCacheFactory cacheFactory, byte[] txState, MemoryChunk chunk,
- boolean usePersistentCache, boolean useProtoForIndexMaintainer,
- int clientVersion) throws SQLException {
- this.cacheId = cacheId;
- this.cachePtr = cachePtr;
- this.size = cachePtr.getLength();
- this.hits = 0;
- this.liveQueriesCount = 0;
- this.usePersistentCache = usePersistentCache;
- this.closeable = cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer, clientVersion);
- }
-
- public void close() throws IOException {
- this.closeable.close();
- }
+ private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class);
+ private final int maxTimeToLiveMs;
+ private final int maxPersistenceTimeToLiveMs;
+ private final MemoryManager memoryManager;
+ private final Ticker ticker;
- synchronized public void incrementLiveQueryCount() {
- liveQueriesCount++;
- hits++;
- }
+ // Two caches exist: the "serverCaches" cache which is used for handling live
+ // queries, and the "persistentServerCaches" cache which is used to store data
+ // between queries. If we are out of memory, attempt to clear out entries from
+ // the persistent cache before throwing an exception.
+ private volatile Cache serverCaches;
+ private volatile Cache persistentServerCaches;
- synchronized public void decrementLiveQueryCount() {
- liveQueriesCount--;
- }
+ private final long EVICTION_MARGIN_BYTES = 10000000;
- synchronized public boolean isLive() {
- return liveQueriesCount > 0;
- }
+ private static class CacheEntry implements Comparable, Closeable {
+ private ImmutableBytesPtr cacheId;
+ private ImmutableBytesWritable cachePtr;
+ private int hits;
+ private int liveQueriesCount;
+ private boolean usePersistentCache;
+ private long size;
+ private Closeable closeable;
- public boolean getUsePersistentCache() {
- return usePersistentCache;
- }
+ public CacheEntry(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr,
+ ServerCacheFactory cacheFactory, byte[] txState, MemoryChunk chunk,
+ boolean usePersistentCache, boolean useProtoForIndexMaintainer, int clientVersion)
+ throws SQLException {
+ this.cacheId = cacheId;
+ this.cachePtr = cachePtr;
+ this.size = cachePtr.getLength();
+ this.hits = 0;
+ this.liveQueriesCount = 0;
+ this.usePersistentCache = usePersistentCache;
+ this.closeable =
+ cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer, clientVersion);
+ }
- public ImmutableBytesPtr getCacheId() {
- return cacheId;
- }
+ public void close() throws IOException {
+ this.closeable.close();
+ }
- private Float rank() {
- return (float)hits;
- }
+ synchronized public void incrementLiveQueryCount() {
+ liveQueriesCount++;
+ hits++;
+ }
- @Override
- public int compareTo(CacheEntry o) {
- return rank().compareTo(o.rank());
- }
+ synchronized public void decrementLiveQueryCount() {
+ liveQueriesCount--;
}
- public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs) {
- this(memoryManager, maxTimeToLiveMs, maxPersistenceTimeToLiveMs, Ticker.systemTicker());
+ synchronized public boolean isLive() {
+ return liveQueriesCount > 0;
}
-
- public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs, Ticker ticker) {
- this.memoryManager = memoryManager;
- this.maxTimeToLiveMs = maxTimeToLiveMs;
- this.maxPersistenceTimeToLiveMs = maxPersistenceTimeToLiveMs;
- this.ticker = ticker;
+
+ public boolean getUsePersistentCache() {
+ return usePersistentCache;
}
-
- public Ticker getTicker() {
- return ticker;
+
+ public ImmutableBytesPtr getCacheId() {
+ return cacheId;
}
-
- // For testing
- public void cleanUp() {
- synchronized(this) {
- if (serverCaches != null) {
- serverCaches.cleanUp();
- }
- if (persistentServerCaches != null) {
- persistentServerCaches.cleanUp();
- }
- }
+
+ private Float rank() {
+ return (float) hits;
}
-
+
@Override
- public MemoryManager getMemoryManager() {
- return memoryManager;
+ public int compareTo(CacheEntry o) {
+ return rank().compareTo(o.rank());
}
+ }
+
+ public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs,
+ int maxPersistenceTimeToLiveMs) {
+ this(memoryManager, maxTimeToLiveMs, maxPersistenceTimeToLiveMs, Ticker.systemTicker());
+ }
+
+ public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs,
+ int maxPersistenceTimeToLiveMs, Ticker ticker) {
+ this.memoryManager = memoryManager;
+ this.maxTimeToLiveMs = maxTimeToLiveMs;
+ this.maxPersistenceTimeToLiveMs = maxPersistenceTimeToLiveMs;
+ this.ticker = ticker;
+ }
+
+ public Ticker getTicker() {
+ return ticker;
+ }
- private Cache getServerCaches() {
- /* Delay creation of this map until it's needed */
+ // For testing
+ public void cleanUp() {
+ synchronized (this) {
+ if (serverCaches != null) {
+ serverCaches.cleanUp();
+ }
+ if (persistentServerCaches != null) {
+ persistentServerCaches.cleanUp();
+ }
+ }
+ }
+
+ @Override
+ public MemoryManager getMemoryManager() {
+ return memoryManager;
+ }
+
+ private Cache getServerCaches() {
+ /* Delay creation of this map until it's needed */
+ if (serverCaches == null) {
+ synchronized (this) {
if (serverCaches == null) {
- synchronized(this) {
- if (serverCaches == null) {
- serverCaches = buildCache(maxTimeToLiveMs, false);
- }
- }
+ serverCaches = buildCache(maxTimeToLiveMs, false);
}
- return serverCaches;
+ }
}
+ return serverCaches;
+ }
- private Cache getPersistentServerCaches() {
- /* Delay creation of this map until it's needed */
+ private Cache getPersistentServerCaches() {
+ /* Delay creation of this map until it's needed */
+ if (persistentServerCaches == null) {
+ synchronized (this) {
if (persistentServerCaches == null) {
- synchronized(this) {
- if (persistentServerCaches == null) {
- persistentServerCaches = buildCache(maxPersistenceTimeToLiveMs, true);
- }
- }
+ persistentServerCaches = buildCache(maxPersistenceTimeToLiveMs, true);
}
- return persistentServerCaches;
+ }
}
+ return persistentServerCaches;
+ }
- private Cache buildCache(final int ttl, final boolean isPersistent) {
- CacheBuilder