Skip to content

Commit bcca4f7

Browse files
szucsvillostoty
andcommitted
PHOENIX-7478 HBase 3 compatibility changes: Replace ClusterConnection with Connection API
Co-authored-by: Istvan Toth <[email protected]>
1 parent a51a57a commit bcca4f7

File tree

6 files changed

+64
-51
lines changed

6 files changed

+64
-51
lines changed

phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java

+11-6
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,6 @@
154154
import org.apache.hadoop.hbase.client.Admin;
155155
import org.apache.hadoop.hbase.client.Append;
156156
import org.apache.hadoop.hbase.client.CheckAndMutate;
157-
import org.apache.hadoop.hbase.client.ClusterConnection;
158157
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
159158
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
160159
import org.apache.hadoop.hbase.client.Connection;
@@ -771,7 +770,13 @@ public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tena
771770

772771
@Override
773772
public void clearTableRegionCache(TableName tableName) throws SQLException {
774-
((ClusterConnection)connection).clearRegionCache(tableName);
773+
try {
774+
connection.getRegionLocator(tableName).clearRegionLocationCache();
775+
} catch (IOException e) {
776+
LOGGER.info("Exception while clearing table region cache", e);
777+
//TODO allow passing cause to TableNotFoundException
778+
throw new TableNotFoundException(tableName.toString());
779+
}
775780
}
776781

777782
public byte[] getNextRegionStartKey(HRegionLocation regionLocation, byte[] currentKey,
@@ -875,8 +880,7 @@ public List<HRegionLocation> getTableRegions(final byte[] tableName, final byte[
875880
currentKey = startRowKey;
876881
do {
877882
HRegionLocation regionLocation =
878-
((ClusterConnection) connection).getRegionLocation(table,
879-
currentKey, false);
883+
connection.getRegionLocator(table).getRegionLocation(currentKey, false);
880884
currentKey =
881885
getNextRegionStartKey(regionLocation, currentKey, prevRegionLocation);
882886
locations.add(regionLocation);
@@ -2179,8 +2183,9 @@ private MetaDataMutationResult metaDataCoprocessorExec(String tableName, byte[]
21792183
long startTime = EnvironmentEdgeManager.currentTimeMillis();
21802184
while (true) {
21812185
if (retried) {
2182-
((ClusterConnection) connection).relocateRegion(
2183-
SchemaUtil.getPhysicalName(systemTableName, this.getProps()), tableKey);
2186+
connection.getRegionLocator(SchemaUtil.getPhysicalName(
2187+
systemTableName, this.getProps()))
2188+
.getRegionLocation(tableKey, true);
21842189
}
21852190

21862191
Table ht = this.getTable(SchemaUtil.getPhysicalName(systemTableName, this.getProps()).getName());

phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java

+27-18
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@
186186
import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
187187
import org.apache.hadoop.hbase.HConstants;
188188
import org.apache.hadoop.hbase.client.Admin;
189-
import org.apache.hadoop.hbase.client.ClusterConnection;
190189
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
191190
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
192191
import org.apache.hadoop.hbase.client.Delete;
@@ -6573,7 +6572,7 @@ public MutationState changePermissions(ChangePermsStatement changePermsStatement
65736572
LOGGER.info(changePermsStatement.toString());
65746573

65756574
try(Admin admin = connection.getQueryServices().getAdmin()) {
6576-
ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection();
6575+
org.apache.hadoop.hbase.client.Connection hConnection = admin.getConnection();
65776576

65786577
if (changePermsStatement.getSchemaName() != null) {
65796578
// SYSTEM.CATALOG doesn't have any entry for "default" HBase namespace, hence we will bypass the check
@@ -6583,7 +6582,7 @@ public MutationState changePermissions(ChangePermsStatement changePermsStatement
65836582
connection);
65846583
}
65856584

6586-
changePermsOnSchema(clusterConnection, changePermsStatement);
6585+
changePermsOnSchema(hConnection, changePermsStatement);
65876586
} else if (changePermsStatement.getTableName() != null) {
65886587
PTable inputTable = connection.getTable(SchemaUtil.
65896588
normalizeFullTableName(changePermsStatement.getTableName().toString()));
@@ -6593,11 +6592,11 @@ public MutationState changePermissions(ChangePermsStatement changePermsStatement
65936592

65946593
// Changing perms on base table and update the perms for global and view indexes
65956594
// Views and local indexes are not physical tables and hence update perms is not needed
6596-
changePermsOnTables(clusterConnection, admin, changePermsStatement, inputTable);
6595+
changePermsOnTables(hConnection, admin, changePermsStatement, inputTable);
65976596
} else {
65986597

65996598
// User can be given perms at the global level
6600-
changePermsOnUser(clusterConnection, changePermsStatement);
6599+
changePermsOnUser(hConnection, changePermsStatement);
66016600
}
66026601

66036602
} catch (SQLException e) {
@@ -6612,20 +6611,25 @@ public MutationState changePermissions(ChangePermsStatement changePermsStatement
66126611
return new MutationState(0, 0, connection);
66136612
}
66146613

6615-
private void changePermsOnSchema(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement) throws Throwable {
6614+
private void changePermsOnSchema(org.apache.hadoop.hbase.client.Connection hConnection,
6615+
ChangePermsStatement changePermsStatement) throws Throwable {
66166616
if (changePermsStatement.isGrantStatement()) {
6617-
AccessControlClient.grant(clusterConnection, changePermsStatement.getSchemaName(), changePermsStatement.getName(), changePermsStatement.getPermsList());
6617+
AccessControlClient.grant(hConnection, changePermsStatement.getSchemaName(),
6618+
changePermsStatement.getName(), changePermsStatement.getPermsList());
66186619
} else {
6619-
AccessControlClient.revoke(clusterConnection, changePermsStatement.getSchemaName(), changePermsStatement.getName(), Permission.Action.values());
6620+
AccessControlClient.revoke(hConnection, changePermsStatement.getSchemaName(),
6621+
changePermsStatement.getName(), Permission.Action.values());
66206622
}
66216623
}
66226624

6623-
private void changePermsOnTables(ClusterConnection clusterConnection, Admin admin, ChangePermsStatement changePermsStatement, PTable inputTable) throws Throwable {
6625+
private void changePermsOnTables(org.apache.hadoop.hbase.client.Connection hConnection,
6626+
Admin admin, ChangePermsStatement changePermsStatement,
6627+
PTable inputTable) throws Throwable {
66246628

66256629
org.apache.hadoop.hbase.TableName tableName = SchemaUtil.getPhysicalTableName
66266630
(inputTable.getPhysicalName().getBytes(), inputTable.isNamespaceMapped());
66276631

6628-
changePermsOnTable(clusterConnection, changePermsStatement, tableName);
6632+
changePermsOnTable(hConnection, changePermsStatement, tableName);
66296633

66306634
boolean schemaInconsistency = false;
66316635
List<PTable> inconsistentTables = null;
@@ -6646,7 +6650,7 @@ private void changePermsOnTables(ClusterConnection clusterConnection, Admin admi
66466650
LOGGER.info("Updating permissions for Index Table: " +
66476651
indexTable.getName() + " Base Table: " + inputTable.getName());
66486652
tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), indexTable.isNamespaceMapped());
6649-
changePermsOnTable(clusterConnection, changePermsStatement, tableName);
6653+
changePermsOnTable(hConnection, changePermsStatement, tableName);
66506654
}
66516655

66526656
if (schemaInconsistency) {
@@ -6664,7 +6668,7 @@ private void changePermsOnTables(ClusterConnection clusterConnection, Admin admi
66646668
if (viewIndexTableExists) {
66656669
LOGGER.info("Updating permissions for View Index Table: " +
66666670
Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName());
6667-
changePermsOnTable(clusterConnection, changePermsStatement, tableName);
6671+
changePermsOnTable(hConnection, changePermsStatement, tableName);
66686672
} else {
66696673
if (inputTable.isMultiTenant()) {
66706674
LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
@@ -6675,23 +6679,28 @@ private void changePermsOnTables(ClusterConnection clusterConnection, Admin admi
66756679
}
66766680
}
66776681

6678-
private void changePermsOnTable(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement, org.apache.hadoop.hbase.TableName tableName)
6682+
private void changePermsOnTable(org.apache.hadoop.hbase.client.Connection hConnection,
6683+
ChangePermsStatement changePermsStatement,
6684+
org.apache.hadoop.hbase.TableName tableName)
66796685
throws Throwable {
66806686
if (changePermsStatement.isGrantStatement()) {
6681-
AccessControlClient.grant(clusterConnection, tableName, changePermsStatement.getName(),
6687+
AccessControlClient.grant(hConnection, tableName, changePermsStatement.getName(),
66826688
null, null, changePermsStatement.getPermsList());
66836689
} else {
6684-
AccessControlClient.revoke(clusterConnection, tableName, changePermsStatement.getName(),
6690+
AccessControlClient.revoke(hConnection, tableName, changePermsStatement.getName(),
66856691
null, null, Permission.Action.values());
66866692
}
66876693
}
66886694

6689-
private void changePermsOnUser(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement)
6695+
private void changePermsOnUser(org.apache.hadoop.hbase.client.Connection hConnection,
6696+
ChangePermsStatement changePermsStatement)
66906697
throws Throwable {
66916698
if (changePermsStatement.isGrantStatement()) {
6692-
AccessControlClient.grant(clusterConnection, changePermsStatement.getName(), changePermsStatement.getPermsList());
6699+
AccessControlClient.grant(hConnection, changePermsStatement.getName(),
6700+
changePermsStatement.getPermsList());
66936701
} else {
6694-
AccessControlClient.revoke(clusterConnection, changePermsStatement.getName(), Permission.Action.values());
6702+
AccessControlClient.revoke(hConnection, changePermsStatement.getName(),
6703+
Permission.Action.values());
66956704
}
66966705
}
66976706
}

phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java

+23-21
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,11 @@
4242
import org.apache.hadoop.hbase.CellUtil;
4343
import org.apache.hadoop.hbase.CoprocessorEnvironment;
4444
import org.apache.hadoop.hbase.HConstants;
45-
import org.apache.hadoop.hbase.HRegionLocation;
46-
import org.apache.hadoop.hbase.ServerName;
47-
import org.apache.hadoop.hbase.client.ClusterConnection;
45+
import org.apache.hadoop.hbase.TableName;
46+
import org.apache.hadoop.hbase.client.Admin;
47+
import org.apache.hadoop.hbase.client.Connection;
4848
import org.apache.hadoop.hbase.client.ConnectionFactory;
49+
import org.apache.hadoop.hbase.client.RegionInfo;
4950
import org.apache.hadoop.hbase.client.Result;
5051
import org.apache.hadoop.hbase.client.Scan;
5152
import org.apache.hadoop.hbase.client.Table;
@@ -56,12 +57,9 @@
5657
import org.apache.hadoop.hbase.filter.CompareFilter;
5758
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
5859
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
59-
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
6060
import org.apache.hadoop.hbase.regionserver.RegionScanner;
6161
import org.apache.hadoop.hbase.security.User;
62-
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
6362
import org.apache.hadoop.hbase.util.Pair;
64-
import org.apache.hadoop.ipc.RemoteException;
6563
import org.apache.phoenix.cache.GlobalCache;
6664
import org.apache.phoenix.compile.MutationPlan;
6765
import org.apache.phoenix.compile.PostDDLCompiler;
@@ -677,22 +675,26 @@ public static PhoenixConnection getRebuildIndexConnection(Configuration config)
677675
}
678676

679677
public static boolean tableRegionsOnline(Configuration conf, PTable table) {
680-
try (ClusterConnection hcon =
681-
(ClusterConnection) ConnectionFactory.createConnection(conf)) {
682-
List<HRegionLocation> locations = hcon.locateRegions(
683-
org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes()));
684-
685-
for (HRegionLocation loc : locations) {
678+
try (Connection hcon = ConnectionFactory.createConnection(conf)) {
679+
Admin admin = hcon.getAdmin();
680+
List<RegionInfo> regionInfos = admin.getRegions(TableName.valueOf(
681+
table.getPhysicalName().getBytes()));
682+
// This makes Number of Regions RPC calls sequentially.
683+
// For large tables this can be slow.
684+
for (RegionInfo regionInfo : regionInfos) {
686685
try {
687-
ServerName sn = loc.getServerName();
688-
if (sn == null) continue;
689-
690-
AdminProtos.AdminService.BlockingInterface admin = hcon.getAdmin(sn);
691-
HBaseRpcController controller = hcon.getRpcControllerFactory().newController();
692-
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller,
693-
admin, loc.getRegion().getRegionName());
694-
} catch (RemoteException e) {
695-
LOGGER.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e);
686+
// We don't actually care about the compaction state, we are only calling this
687+
// because this will trigger a call to the RS (from master), and we want to make
688+
// sure that all RSs are available
689+
// There are only a few methods in HBase 3.0 that are directly calling the RS,
690+
// this is one of them.
691+
admin.getCompactionStateForRegion(regionInfo.getRegionName());
692+
// This used to make a direct RPC call to the region, but HBase 3 makes that
693+
// very hard (needs reflection, or a bridge class in the same package),
694+
// and it's not necessary for checking the RS liveness
695+
} catch (IOException e) {
696+
LOGGER.debug("Cannot get region " + regionInfo.getEncodedName()
697+
+ " info due to error:" + e);
696698
return false;
697699
}
698700
}

phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java

+1-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
import org.apache.hadoop.hbase.MiniHBaseCluster;
4242
import org.apache.hadoop.hbase.ServerName;
4343
import org.apache.hadoop.hbase.TableName;
44-
import org.apache.hadoop.hbase.client.ClusterConnection;
4544
import org.apache.hadoop.hbase.client.ConnectionFactory;
4645
import org.apache.hadoop.hbase.client.Mutation;
4746
import org.apache.hadoop.hbase.client.Put;
@@ -259,7 +258,7 @@ public void testRecoveryRegionPostOpen() throws Exception {
259258
scan = new Scan();
260259
primaryTable.close();
261260
primaryTable = hbaseConn.getTable(TableName.valueOf(DATA_TABLE_NAME));
262-
((ClusterConnection)hbaseConn).clearRegionLocationCache();
261+
hbaseConn.clearRegionLocationCache();
263262
resultScanner = primaryTable.getScanner(scan);
264263
count = 0;
265264
for (Result result : resultScanner) {

phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java

+1-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import org.apache.hadoop.conf.Configuration;
3939
import org.apache.hadoop.hbase.TableName;
4040
import org.apache.hadoop.hbase.client.Admin;
41-
import org.apache.hadoop.hbase.client.ClusterConnection;
4241
import org.apache.hadoop.hbase.client.ConnectionFactory;
4342
import org.apache.hadoop.hbase.client.RegionLocator;
4443
import org.apache.hadoop.hbase.util.Bytes;
@@ -108,8 +107,8 @@ public void testSplitWithCachedMeta() throws Exception {
108107
admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
109108
Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
110109
org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(configuration);
111-
((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));
112110
RegionLocator regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(tableName));
111+
regionLocator.clearRegionLocationCache();
113112
int nRegions = regionLocator.getAllRegionLocations().size();
114113
admin.split(tn, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
115114
int retryCount = 0;

phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryWithRegionMovesIT.java

+1-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import org.apache.hadoop.conf.Configuration;
2121
import org.apache.hadoop.hbase.TableName;
2222
import org.apache.hadoop.hbase.client.Admin;
23-
import org.apache.hadoop.hbase.client.ClusterConnection;
2423
import org.apache.hadoop.hbase.client.ConnectionFactory;
2524
import org.apache.hadoop.hbase.client.RegionLocator;
2625
import org.apache.hadoop.hbase.util.Bytes;
@@ -135,8 +134,8 @@ public void testSplitWithCachedMeta() throws Exception {
135134
conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
136135
org.apache.hadoop.hbase.client.Connection hbaseConn =
137136
ConnectionFactory.createConnection(configuration);
138-
((ClusterConnection) hbaseConn).clearRegionCache(TableName.valueOf(tableName));
139137
RegionLocator regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(tableName));
138+
regionLocator.clearRegionLocationCache();
140139
int nRegions = regionLocator.getAllRegionLocations().size();
141140
admin.split(tn, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
142141
int retryCount = 0;

0 commit comments

Comments
 (0)