Skip to content

HBASE-29279 Allow throttling alter operation via table configuration #6951

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,6 @@
*/
package org.apache.hadoop.hbase.master.procedure;

import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_DISABLED;
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
Expand All @@ -31,7 +26,6 @@
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ConcurrentTableModificationException;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
Expand Down Expand Up @@ -230,13 +224,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS
break;
case MODIFY_TABLE_REOPEN_ALL_REGIONS:
if (isTableEnabled(env)) {
Configuration conf = env.getMasterConfiguration();
long backoffMillis = conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT);
int batchSizeMax =
conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
addChildProcedure(
new ReopenTableRegionsProcedure(getTableName(), backoffMillis, batchSizeMax));
addChildProcedure(ReopenTableRegionsProcedure.throttled(env.getMasterConfiguration(),
env.getMasterServices().getTableDescriptors().get(getTableName())));
}
setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS);
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,13 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
Expand Down Expand Up @@ -55,12 +59,11 @@ public class ReopenTableRegionsProcedure
private static final Logger LOG = LoggerFactory.getLogger(ReopenTableRegionsProcedure.class);

public static final String PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY =
"hbase.reopen.table.regions.progressive.batch.backoff.ms";
ConfigKey.LONG("hbase.reopen.table.regions.progressive.batch.backoff.ms");
public static final long PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT = 0L;
public static final String PROGRESSIVE_BATCH_SIZE_MAX_KEY =
"hbase.reopen.table.regions.progressive.batch.size.max";
ConfigKey.INT("hbase.reopen.table.regions.progressive.batch.size.max");
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Type checks are now necessary.

public static final int PROGRESSIVE_BATCH_SIZE_MAX_DISABLED = -1;
private static final int PROGRESSIVE_BATCH_SIZE_MAX_DEFAULT_VALUE = Integer.MAX_VALUE;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one was unused.


// this minimum prevents a max which would break this procedure
private static final int MINIMUM_BATCH_SIZE_MAX = 1;
Expand All @@ -83,6 +86,23 @@ public class ReopenTableRegionsProcedure
private long regionsReopened = 0;
private long batchesProcessed = 0;

/**
* Create a new ReopenTableRegionsProcedure respecting the throttling configuration for the table.
* First check the table descriptor, then fall back to the global configuration. Only used in
* ModifyTableProcedure.
*/
public static ReopenTableRegionsProcedure throttled(final Configuration conf,
final TableDescriptor desc) {
long backoffMillis = Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Pity we do not return Optional for desc.getValue...

.map(Long::parseLong).orElseGet(() -> conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT));
int batchSizeMax = Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY))
.map(Integer::parseInt).orElseGet(
() -> conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, PROGRESSIVE_BATCH_SIZE_MAX_DISABLED));

return new ReopenTableRegionsProcedure(desc.getTableName(), backoffMillis, batchSizeMax);
}

public ReopenTableRegionsProcedure() {
this(null);
}
Expand All @@ -96,12 +116,12 @@ public ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]>
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
}

public ReopenTableRegionsProcedure(final TableName tableName, long reopenBatchBackoffMillis,
ReopenTableRegionsProcedure(final TableName tableName, long reopenBatchBackoffMillis,
int reopenBatchSizeMax) {
this(tableName, Collections.emptyList(), reopenBatchBackoffMillis, reopenBatchSizeMax);
}

public ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]> regionNames,
private ReopenTableRegionsProcedure(final TableName tableName, final List<byte[]> regionNames,
long reopenBatchBackoffMillis, int reopenBatchSizeMax) {
this.tableName = tableName;
this.regionNames = regionNames;
Expand Down Expand Up @@ -137,6 +157,12 @@ public long getBatchesProcessed() {
return batchesProcessed;
}

@RestrictedApi(explanation = "Should only be called in tests", link = "",
allowedOnPath = ".*/src/test/.*")
long getReopenBatchBackoffMillis() {
return reopenBatchBackoffMillis;
}

@RestrictedApi(explanation = "Should only be called internally or in tests", link = "",
allowedOnPath = ".*(/src/test/.*|ReopenTableRegionsProcedure).java")
protected int progressBatchSize() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.master.procedure;

import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

Expand All @@ -26,9 +29,11 @@
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
Expand Down Expand Up @@ -160,6 +165,37 @@ public void testBatchSizeDoesNotOverflow() {
}
}

@Test
public void testBackoffConfigurationFromTableDescriptor() {
Configuration conf = HBaseConfiguration.create();
TableDescriptorBuilder tbd = TableDescriptorBuilder.newBuilder(TABLE_NAME);

// Default (no batching, no backoff)
ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
assertEquals(PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT, proc.getReopenBatchBackoffMillis());
assertEquals(Integer.MAX_VALUE, proc.progressBatchSize());

// From Configuration (backoff: 100ms, max: 6)
conf.setLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, 100);
conf.setInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, 6);
proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
assertEquals(100, proc.getReopenBatchBackoffMillis());
assertEquals(2, proc.progressBatchSize());
assertEquals(4, proc.progressBatchSize());
assertEquals(6, proc.progressBatchSize());
assertEquals(6, proc.progressBatchSize());

// From TableDescriptor (backoff: 200ms, max: 7)
tbd.setValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, "200");
tbd.setValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY, "7");
proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
assertEquals(200, proc.getReopenBatchBackoffMillis());
assertEquals(2, proc.progressBatchSize());
assertEquals(4, proc.progressBatchSize());
assertEquals(7, proc.progressBatchSize());
assertEquals(7, proc.progressBatchSize());
}

private void confirmBatchSize(int expectedBatchSize, Set<StuckRegion> stuckRegions,
ReopenTableRegionsProcedure proc) {
while (true) {
Expand Down