diff --git a/hbase-hbck2/pom.xml b/hbase-hbck2/pom.xml
index 482fbda6a0..663a009027 100644
--- a/hbase-hbck2/pom.xml
+++ b/hbase-hbck2/pom.xml
@@ -150,6 +150,8 @@
2.6
org.apache.hadoop.hbase.MetaTableAccessor
+ org.apache.hadoop.hbase.util.FSUtils
+ org.apache.hadoop.hbase.util.CommonFSUtils
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/FileSystemFsck.java b/hbase-hbck2/src/main/java/org/apache/hbase/FileSystemFsck.java
index d062a1ddbc..7f34b996c8 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/FileSystemFsck.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/FileSystemFsck.java
@@ -26,8 +26,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hbase.hbck1.HBaseFsck;
import org.apache.hbase.hbck1.HFileCorruptionChecker;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
@@ -47,7 +45,7 @@ public class FileSystemFsck implements Closeable {
FileSystemFsck(Configuration conf) throws IOException {
this.configuration = conf;
- this.rootDir = CommonFSUtils.getRootDir(this.configuration);
+ this.rootDir = HBCKFsUtils.getRootDir(this.configuration);
this.fs = rootDir.getFileSystem(this.configuration);
}
@@ -82,8 +80,8 @@ int fsck(String[] args) throws IOException {
hbaseFsck.setHFileCorruptionChecker(hfcc);
Collection tables = commandLine.getArgList();
Collection tableDirs = tables.isEmpty()?
- FSUtils.getTableDirs(this.fs, this.rootDir):
- tables.stream().map(t -> CommonFSUtils.getTableDir(this.rootDir, TableName.valueOf(t))).
+ HBCKFsUtils.getTableDirs(this.fs, this.rootDir):
+ tables.stream().map(t -> HBCKFsUtils.getTableDir(this.rootDir, TableName.valueOf(t))).
collect(Collectors.toList());
hfcc.checkTables(tableDirs);
hfcc.report(hbaseFsck.getErrors());
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
index c8785f8edb..38cefbfd0e 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
@@ -17,13 +17,37 @@
*/
package org.apache.hbase;
+import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Pattern;
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
+import org.apache.hadoop.hbase.util.AbstractFileStatusFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FileStatusFilter;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
import org.apache.yetus.audience.InterfaceAudience;
@@ -34,6 +58,9 @@
@InterfaceAudience.Private
public final class HBCKFsUtils {
+
+ /** Full access permissions (starting point for a umask) */
+ public static final String FULL_RWX_PERMISSIONS = "777";
/**
* Private constructor to keep this class from being instantiated.
*/
@@ -85,4 +112,371 @@ public static Path getRootDir(final Configuration c) throws IOException {
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
+
+ /**
+ * COPIED from CommonFSUtils.getCurrentFileSystem
+ *
+ * @param conf must not be null
+ * @return Returns the filesystem of the hbase rootdir.
+ * @throws IOException from underlying FileSystem
+ */
+ public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
+ return getRootDir(conf).getFileSystem(conf);
+ }
+
+
+ /**
+ *
+ * COPIED from FSUtils.getTableDirs
+ *
+ */
+ public static List getTableDirs(final FileSystem fs, final Path rootdir)
+ throws IOException {
+ List tableDirs = new ArrayList<>();
+
+ for (FileStatus status : fs
+ .globStatus(new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
+ tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
+ }
+ return tableDirs;
+ }
+
+ public static TableName getTableName(Path tableDir){
+ return TableName.valueOf(tableDir.getParent().getName(), tableDir.getName());
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
+ * This accommodates differences between hadoop versions, where hadoop 1
+ * does not throw a FileNotFoundException, and return an empty FileStatus[]
+ * while Hadoop 2 will throw FileNotFoundException.
+ *
+ * @param fs file system
+ * @param dir directory
+ * @param filter file status filter
+ * @return null if dir is empty or doesn't exist, otherwise FileStatus list
+ */
+ public static List listStatusWithStatusFilter(final FileSystem fs,
+ final Path dir, final FileStatusFilter filter) throws IOException {
+ FileStatus [] status = null;
+ try {
+ status = fs.listStatus(dir);
+ } catch (FileNotFoundException fnfe) {
+ return null;
+ }
+
+ if (ArrayUtils.getLength(status) == 0) {
+ return null;
+ }
+
+ if (filter == null) {
+ return Arrays.asList(status);
+ } else {
+ List status2 = filterFileStatuses(status, filter);
+ if (status2 == null || status2.isEmpty()) {
+ return null;
+ } else {
+ return status2;
+ }
+ }
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Sets version of file system
+ *
+ * @param fs filesystem object
+ * @param rootdir hbase root directory
+ * @param version version to set
+ * @param wait time to wait for retry
+ * @param retries number of times to retry before throwing an IOException
+ * @throws IOException e
+ */
+ public static void setVersion(FileSystem fs, Path rootdir, String version,
+ int wait, int retries) throws IOException {
+ Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
+ Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
+ HConstants.VERSION_FILE_NAME);
+ while (true) {
+ try {
+ // Write the version to a temporary file
+ FSDataOutputStream s = fs.create(tempVersionFile);
+ try {
+ s.write(toVersionByteArray(version));
+ s.close();
+ s = null;
+ // Move the temp version file to its normal location. Returns false
+ // if the rename failed. Throw an IOE in that case.
+ if (!fs.rename(tempVersionFile, versionFile)) {
+ throw new IOException("Unable to move temp version file to " + versionFile);
+ }
+ } finally {
+ // Cleaning up the temporary if the rename failed would be trying
+ // too hard. We'll unconditionally create it again the next time
+ // through anyway, files are overwritten by default by create().
+
+ // Attempt to close the stream on the way out if it is still open.
+ try {
+ if (s != null) s.close();
+ } catch (IOException ignore) { }
+ }
+ return;
+ } catch (IOException e) {
+ if (retries > 0) {
+ fs.delete(versionFile, false);
+ try {
+ if (wait > 0) {
+ Thread.sleep(wait);
+ }
+ } catch (InterruptedException ie) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
+ }
+ retries--;
+ } else {
+ throw e;
+ }
+ }
+ }
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Get the file permissions specified in the configuration, if they are
+ * enabled.
+ *
+ * @param fs filesystem that the file will be created on.
+ * @param conf configuration to read for determining if permissions are
+ * enabled and which to use
+ * @param permssionConfKey property key in the configuration to use when
+ * finding the permission
+ * @return the permission to use when creating a new file on the fs. If
+ * special permissions are not specified in the configuration, then
+ * the default permissions on the the fs will be returned.
+ */
+ public static FsPermission getFilePermissions(final FileSystem fs,
+ final Configuration conf, final String permssionConfKey) {
+ boolean enablePermissions = conf.getBoolean(
+ HConstants.ENABLE_DATA_FILE_UMASK, false);
+
+ if (enablePermissions) {
+ try {
+ FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
+ // make sure that we have a mask, if not, go default.
+ String mask = conf.get(permssionConfKey);
+ if (mask == null) {
+ return FsPermission.getFileDefault();
+ }
+ // appy the umask
+ FsPermission umask = new FsPermission(mask);
+ return perm.applyUMask(umask);
+ } catch (IllegalArgumentException e) {
+ return FsPermission.getFileDefault();
+ }
+ }
+ return FsPermission.getFileDefault();
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ *
+ * @param fs
+ * @param familyDir
+ * @return
+ * @throws IOException
+ */
+ public static List getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
+ List fds = listStatusWithStatusFilter(fs, familyDir, new FSUtils.ReferenceFileFilter(fs));
+ if (fds == null) {
+ return Collections.emptyList();
+ }
+ List referenceFiles = new ArrayList<>(fds.size());
+ for (FileStatus fdfs: fds) {
+ Path fdPath = fdfs.getPath();
+ referenceFiles.add(fdPath);
+ }
+ return referenceFiles;
+ }
+
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Given a particular region dir, return all the familydirs inside it
+ *
+ * @param fs A file system for the Path
+ * @param regionDir Path to a specific region directory
+ * @return List of paths to valid family directories in region dir.
+ * @throws IOException
+ */
+ public static List getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
+ // assumes we are in a region dir.
+ FileStatus[] fds = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs));
+ List familyDirs = new ArrayList<>(fds.length);
+ for (FileStatus fdfs: fds) {
+ Path fdPath = fdfs.getPath();
+ familyDirs.add(fdPath);
+ }
+ return familyDirs;
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
+ * @param version Version to persist
+ * @return Serialized protobuf with version
content and a bit of pb magic for a prefix.
+ */
+ static byte [] toVersionByteArray(final String version) {
+ FSProtos.HBaseVersionFileContent.Builder builder =
+ FSProtos.HBaseVersionFileContent.newBuilder();
+ return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Filter for all dirs that don't start with '.'
+ */
+ public static class RegionDirFilter extends AbstractFileStatusFilter {
+ // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
+ final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
+ final FileSystem fs;
+
+ public RegionDirFilter(FileSystem fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ protected boolean accept(Path p, @CheckForNull Boolean isDir) {
+ if (!regionDirPattern.matcher(p.getName()).matches()) {
+ return false;
+ }
+
+ try {
+ return isDirectory(fs, isDir, p);
+ } catch (IOException ioe) {
+ // Maybe the file was moved or the fs was disconnected.
+ return false;
+ }
+ }
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Filters FileStatuses in an array and returns a list
+ *
+ * @param input An array of FileStatuses
+ * @param filter A required filter to filter the array
+ * @return A list of FileStatuses
+ */
+ public static List filterFileStatuses(FileStatus[] input,
+ FileStatusFilter filter) {
+ if (input == null) return null;
+ return filterFileStatuses(Iterators.forArray(input), filter);
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Filters FileStatuses in an iterator and returns a list
+ *
+ * @param input An iterator of FileStatuses
+ * @param filter A required filter to filter the array
+ * @return A list of FileStatuses
+ */
+ public static List filterFileStatuses(Iterator input,
+ FileStatusFilter filter) {
+ if (input == null) return null;
+ ArrayList results = new ArrayList<>();
+ while (input.hasNext()) {
+ FileStatus f = input.next();
+ if (filter.accept(f)) {
+ results.add(f);
+ }
+ }
+ return results;
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * A FileStatusFilter for filtering links.
+ */
+ public static class ReferenceFileFilter extends AbstractFileStatusFilter {
+
+ private final FileSystem fs;
+
+ public ReferenceFileFilter(FileSystem fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ protected boolean accept(Path p, @CheckForNull Boolean isDir) {
+ if (!StoreFileInfo.isReference(p)) {
+ return false;
+ }
+
+ try {
+ // only files can be references.
+ return isFile(fs, isDir, p);
+ } catch (IOException ioe) {
+ // Maybe the file was moved or the fs was disconnected.
+ return false;
+ }
+ }
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Filter for HFileLinks (StoreFiles and HFiles not included).
+ * the filter itself does not consider if a link is file or not.
+ */
+ public static class HFileLinkFilter implements PathFilter {
+
+ @Override
+ public boolean accept(Path p) {
+ return HFileLink.isHFileLink(p);
+ }
+ }
+
+ /**
+ * COPIED from FSUtils.
+ *
+ * Filter for all dirs that are legal column family names. This is generally used for colfam
+ * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
+ */
+ public static class FamilyDirFilter extends AbstractFileStatusFilter {
+ final FileSystem fs;
+
+ public FamilyDirFilter(FileSystem fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ protected boolean accept(Path p, @CheckForNull Boolean isDir) {
+ try {
+ // throws IAE if invalid
+ HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(p.getName()));
+ } catch (IllegalArgumentException iae) {
+ // path name is an invalid family name and thus is excluded.
+ return false;
+ }
+
+ try {
+ return isDirectory(fs, isDir, p);
+ } catch (IOException ioe) {
+ // Maybe the file was moved or the fs was disconnected.
+ return false;
+ }
+ }
+ }
}
+
+
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
index 6313b85680..9aae64462b 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
@@ -126,8 +126,18 @@
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.*;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.KeyRange;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.RegionSplitCalculator;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -137,6 +147,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
+import org.apache.hbase.HBCKFsUtils;
import org.apache.hbase.HBCKMetaTableAccessor;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -421,7 +432,7 @@ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration
*/
@VisibleForTesting
public static Path getTmpDir(Configuration conf) throws IOException {
- return new Path(CommonFSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);
+ return new Path(HBCKFsUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);
}
/**
@@ -453,8 +464,8 @@ Path getHbckLockPath() {
@Override
public FSDataOutputStream call() throws IOException {
try {
- FileSystem fs = CommonFSUtils.getCurrentFileSystem(this.conf);
- FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf,
+ FileSystem fs = HBCKFsUtils.getCurrentFileSystem(this.conf);
+ FsPermission defaultPerms = HBCKFsUtils.getFilePermissions(fs, this.conf,
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = getTmpDir(conf);
this.hbckLockPath = new Path(tmpDir, this.lockFileName);
@@ -482,7 +493,11 @@ private FSDataOutputStream createFileWithRetries(final FileSystem fs,
IOException exception = null;
do {
try {
- return CommonFSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
+ return fs.create(hbckLockFilePath, defaultPerms, false,
+ fs.getConf().getInt("io.file.buffer.size", 4096),
+ fs.getDefaultReplication(hbckLockFilePath),
+ fs.getDefaultBlockSize(hbckLockFilePath),
+ null);
} catch (IOException ioe) {
LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
+ ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
@@ -547,7 +562,7 @@ private void unlockHbck() {
do {
try {
IOUtils.closeQuietly(hbckOutFd);
- CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), hbckLockPath, true);
+ HBCKFsUtils.getCurrentFileSystem(getConf()).delete(hbckLockPath, true);
return;
} catch (IOException ioe) {
LOG.info("Failed to delete " + hbckLockPath + ", try="
@@ -932,9 +947,9 @@ public void checkRegionBoundaries() {
List regions = HBCKMetaTableAccessor.getAllRegions(connection);
final RegionBoundariesInformation currentRegionBoundariesInformation =
new RegionBoundariesInformation();
- Path hbaseRoot = CommonFSUtils.getRootDir(getConf());
+ Path hbaseRoot = HBCKFsUtils.getRootDir(getConf());
for (RegionInfo regionInfo : regions) {
- Path tableDir = CommonFSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
+ Path tableDir = HBCKFsUtils.getTableDir(hbaseRoot, regionInfo.getTable());
currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
// For each region, get the start and stop key from the META and compare them to the
// same information from the Stores.
@@ -1175,10 +1190,10 @@ private int restoreHdfsIntegrity() throws IOException, InterruptedException {
private void offlineReferenceFileRepair() throws IOException, InterruptedException {
clearState();
Configuration conf = getConf();
- Path hbaseRoot = CommonFSUtils.getRootDir(conf);
+ Path hbaseRoot = HBCKFsUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
Map allFiles =
- getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.ReferenceFileFilter(fs), executor);
+ getTableStoreFilePathMap(fs, hbaseRoot, new HBCKFsUtils.ReferenceFileFilter(fs), executor);
for (Path path: allFiles.values()) {
Path referredToFile = StoreFileInfo.getReferredToFile(path);
if (fs.exists(referredToFile)) {
@@ -1237,10 +1252,10 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti
* @return Map keyed by StoreFile name with a value of the full Path.
* @throws IOException When scanning the directory fails.
*/
- // This and the next method are copied over from FSUtils so we can work against more versions
+ // This and the next method are copied over from HBCKFsUtils so we can work against more versions
// of hbase. The signature of this method changed when this went in: HBASE-22721 Refactor
// HBaseFsck: move the inner class out. It went in across a strange set of versions. The method
- // is also wonky because it is about in FSUtils but it takes an Interface from HBCK to do
+ // is also wonky because it is about in HBCKFsUtils but it takes an Interface from HBCK to do
// reporting (to print a '.' every so often). Its just wrong reaching across packages this way,
// especially when no relation.
private static Map getTableStoreFilePathMap(final FileSystem fs,
@@ -1252,8 +1267,8 @@ private static Map getTableStoreFilePathMap(final FileSystem fs,
// it was borrowed from it.
// only include the directory paths to tables
- for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
- getTableStoreFilePathMap(map, fs, hbaseRootDir, CommonFSUtils.getTableName(tableDir),
+ for (Path tableDir : HBCKFsUtils.getTableDirs(fs, hbaseRootDir)) {
+ getTableStoreFilePathMap(map, fs, hbaseRootDir, HBCKFsUtils.getTableName(tableDir),
sfFilter, executor);
}
return map;
@@ -1279,10 +1294,10 @@ private static Map getTableStoreFilePathMap(final FileSystem fs,
* @return Map keyed by StoreFile name with a value of the full Path.
* @throws IOException When scanning the directory fails.
*/
- // Copied over from FSUtils so we can work against more versions of hbase. The signature
+ // Copied over from HBCKFsUtils so we can work against more versions of hbase. The signature
// of this method changed when this went in: HBASE-22721 Refactor HBaseFsck: move the inner
// class out. It went in across a strange set of versions. The method is also wonky because
- // it is about in FSUtils but it takes an Interface from HBCK to do reporting (to print a '.'
+ // it is about in HBCKFsUtils but it takes an Interface from HBCK to do reporting (to print a '.'
// every so often). Its just wrong reaching across packages this way, especially when
// no relation.
private static Map getTableStoreFilePathMap(Map resultMap,
@@ -1294,15 +1309,15 @@ private static Map getTableStoreFilePathMap(Map resu
resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap;
// only include the directory paths to tables
- Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
+ Path tableDir = HBCKFsUtils.getTableDir(hbaseRootDir, tableName);
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions.
- final FSUtils.FamilyDirFilter familyFilter = new FSUtils.FamilyDirFilter(fs);
+ final HBCKFsUtils.FamilyDirFilter familyFilter = new HBCKFsUtils.FamilyDirFilter(fs);
final Vector exceptions = new Vector<>();
try {
List regionDirs =
- FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+ HBCKFsUtils.listStatusWithStatusFilter(fs, tableDir, new HBCKFsUtils.RegionDirFilter(fs));
if (regionDirs == null) {
return finalResultMap;
}
@@ -1322,7 +1337,7 @@ public void run() {
try {
HashMap regionStoreFileMap = new HashMap<>();
List familyDirs =
- FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter);
+ HBCKFsUtils.listStatusWithStatusFilter(fs, dd, familyFilter);
if (familyDirs == null) {
if (!fs.exists(dd)) {
LOG.warn("Skipping region because it no longer exists: " + dd);
@@ -1400,10 +1415,10 @@ public void run() {
*/
private void offlineHLinkFileRepair() throws IOException, InterruptedException {
Configuration conf = getConf();
- Path hbaseRoot = CommonFSUtils.getRootDir(conf);
+ Path hbaseRoot = HBCKFsUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
Map allFiles = getTableStoreFilePathMap(fs, hbaseRoot,
- new FSUtils.HFileLinkFilter(), executor);
+ new HBCKFsUtils.HFileLinkFilter(), executor);
for (Path path : allFiles.values()) {
// building HFileLink object to gather locations
HFileLink actualLink = HFileLink.buildFromHFileLinkPattern(conf, path);
@@ -1574,7 +1589,7 @@ private SortedMap loadHdfsRegionInfos()
}
}
- Path hbaseRoot = CommonFSUtils.getRootDir(getConf());
+ Path hbaseRoot = HBCKFsUtils.getRootDir(getConf());
FileSystem fs = hbaseRoot.getFileSystem(getConf());
// serialized table info gathering.
for (HbckInfo hbi: hbckInfos) {
@@ -1631,7 +1646,7 @@ private SortedMap loadHdfsRegionInfos()
private Set getColumnFamilyList(Set columns, HbckInfo hbi) throws IOException {
Path regionDir = hbi.getHdfsRegionDir();
FileSystem fs = regionDir.getFileSystem(getConf());
- FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs));
+ FileStatus[] subDirs = fs.listStatus(regionDir, new HBCKFsUtils.FamilyDirFilter(fs));
for (FileStatus subdir : subDirs) {
String columnfamily = subdir.getPath().getName();
columns.add(columnfamily);
@@ -1915,7 +1930,7 @@ public boolean rebuildMeta() throws IOException, InterruptedException {
HBaseTestingUtility.closeRegionAndWAL(meta);
// Clean out the WAL we created and used here.
LOG.info("Deleting {}, result={}", waldir,
- CommonFSUtils.delete(FileSystem.get(getConf()), waldir, true));
+ HBCKFsUtils.getCurrentFileSystem(getConf()).delete(waldir, true));
}
LOG.info("Success! hbase:meta table rebuilt. Old hbase:meta moved into " + backupDir);
return true;
@@ -1927,7 +1942,7 @@ public boolean rebuildMeta() throws IOException, InterruptedException {
* @return an open hbase:meta HRegion
*/
private HRegion createNewMeta() throws IOException {
- Path rootdir = CommonFSUtils.getRootDir(getConf());
+ Path rootdir = HBCKFsUtils.getRootDir(getConf());
RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
TableDescriptor td = new FSTableDescriptors(getConf()).get(TableName.META_TABLE_NAME);
return HBaseTestingUtility.createRegionAndWAL(ri, rootdir, getConf(), td);
@@ -1967,7 +1982,7 @@ private SortedMap checkHdfsIntegrity(boolean fixHoles,
private Path getSidelineDir() throws IOException {
if (sidelineDir == null) {
- Path hbaseDir = CommonFSUtils.getRootDir(getConf());
+ Path hbaseDir = HBCKFsUtils.getRootDir(getConf());
Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME);
sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-"
+ startMillis);
@@ -2004,7 +2019,7 @@ Path sidelineRegionDir(FileSystem fs,
if (parentDir != null) {
rootDir = new Path(rootDir, parentDir);
}
- Path sidelineTableDir= CommonFSUtils.getTableDir(rootDir, tableName);
+ Path sidelineTableDir= HBCKFsUtils.getTableDir(rootDir, tableName);
Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName());
fs.mkdirs(sidelineRegionDir);
boolean success = false;
@@ -2065,9 +2080,9 @@ Path sidelineRegionDir(FileSystem fs,
*/
void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
Path backupHbaseDir) throws IOException {
- Path tableDir = CommonFSUtils.getTableDir(hbaseDir, tableName);
+ Path tableDir = HBCKFsUtils.getTableDir(hbaseDir, tableName);
if (fs.exists(tableDir)) {
- Path backupTableDir= CommonFSUtils.getTableDir(backupHbaseDir, tableName);
+ Path backupTableDir= HBCKFsUtils.getTableDir(backupHbaseDir, tableName);
fs.mkdirs(backupTableDir.getParent());
boolean success = fs.rename(tableDir, backupTableDir);
if (!success) {
@@ -2084,7 +2099,7 @@ void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
*/
Path sidelineOldMeta() throws IOException {
// put current hbase:meta aside.
- Path hbaseDir = CommonFSUtils.getRootDir(getConf());
+ Path hbaseDir = HBCKFsUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
Path backupDir = getSidelineDir();
fs.mkdirs(backupDir);
@@ -2134,10 +2149,11 @@ public static boolean versionFileExists(FileSystem fs, Path rootDir) throws IOEx
*/
public static void versionFileCreate(Configuration configuration, FileSystem fs, Path rootDir)
throws IOException {
- FSUtils.setVersion(fs, rootDir,
- configuration.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000),
- configuration.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
- HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+ HBCKFsUtils.setVersion(fs, rootDir,
+ HConstants.FILE_SYSTEM_VERSION,
+ configuration.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000),
+ configuration.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+ HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
/**
@@ -2145,15 +2161,15 @@ public static void versionFileCreate(Configuration configuration, FileSystem fs,
* regionInfoMap
*/
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
- Path rootDir = CommonFSUtils.getRootDir(getConf());
+ Path rootDir = HBCKFsUtils.getRootDir(getConf());
FileSystem fs = rootDir.getFileSystem(getConf());
// List all tables from HDFS
List tableDirs = Lists.newArrayList();
- List paths = FSUtils.getTableDirs(fs, rootDir);
+ List paths = HBCKFsUtils.getTableDirs(fs, rootDir);
for (Path path : paths) {
- TableName tableName = CommonFSUtils.getTableName(path);
+ TableName tableName = HBCKFsUtils.getTableName(path);
if ((!checkMetaOnly && isTableIncluded(tableName)) ||
tableName.equals(TableName.META_TABLE_NAME)) {
tableDirs.add(fs.getFileStatus(path));
@@ -2443,7 +2459,7 @@ private void preCheckPermission() throws IOException {
return;
}
- Path hbaseDir = CommonFSUtils.getRootDir(getConf());
+ Path hbaseDir = HBCKFsUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
UserProvider userProvider = UserProvider.instantiate(getConf());
UserGroupInformation ugi = userProvider.getCurrent().getUGI();
@@ -2841,9 +2857,9 @@ private boolean deletedLeftoverSplitRegion(HbckInfo hbi) throws IOException {
}
Path regionDir = hbi.getHdfsRegionDir();
FileSystem fs = regionDir.getFileSystem(getConf());
- List familyDirs = FSUtils.getFamilyDirs(fs, regionDir);
+ List familyDirs = HBCKFsUtils.getFamilyDirs(fs, regionDir);
for (Path familyDir : familyDirs) {
- List referenceFilePaths = FSUtils.getReferenceFilePaths(fs, familyDir);
+ List referenceFilePaths = HBCKFsUtils.getReferenceFilePaths(fs, familyDir);
for (Path referenceFilePath : referenceFilePaths) {
Path parentRegionDir =
StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent();
@@ -4258,7 +4274,7 @@ public TableName getTableName() {
// we are only guaranteed to have a path and not an HRI for hdfsEntry,
// so we get the name from the Path
Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent();
- return CommonFSUtils.getTableName(tableDir);
+ return HBCKFsUtils.getTableName(tableDir);
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
@@ -5423,13 +5439,13 @@ public HBaseFsck exec(ExecutorService exec, String[] args)
setHFileCorruptionChecker(hfcc); // so we can get result
Collection tables = getIncludedTables();
Collection tableDirs = new ArrayList<>();
- Path rootdir = CommonFSUtils.getRootDir(getConf());
+ Path rootdir = HBCKFsUtils.getRootDir(getConf());
if (tables.size() > 0) {
for (TableName t : tables) {
- tableDirs.add(CommonFSUtils.getTableDir(rootdir, t));
+ tableDirs.add(HBCKFsUtils.getTableDir(rootdir, t));
}
} else {
- tableDirs = FSUtils.getTableDirs(CommonFSUtils.getCurrentFileSystem(getConf()), rootdir);
+ tableDirs = HBCKFsUtils.getTableDirs(HBCKFsUtils.getCurrentFileSystem(getConf()), rootdir);
}
hfcc.checkTables(tableDirs);
hfcc.report(errors);
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
index b2cc879691..137f77d1e1 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
@@ -41,8 +41,7 @@
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hbase.HBCKFsUtils;
import org.apache.hbase.HBCKMetaTableAccessor;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@@ -188,7 +187,7 @@ public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
public static HRegion createHDFSRegionDir(Configuration conf,
RegionInfo hri, TableDescriptor htd) throws IOException {
// Create HRegion
- Path root = CommonFSUtils.getRootDir(conf);
+ Path root = HBCKFsUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
// Close the new region to flush to disk. Close log file too.
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java
index e93bae7535..384fbb7fdd 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java
@@ -41,11 +41,10 @@
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
+import org.apache.hbase.HBCKFsUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -145,7 +144,7 @@ Path createQuarantinePath(Path hFile) throws IOException {
Path tableDir = regionDir.getParent();
// build up the corrupted dirs structure
- Path corruptBaseDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
+ Path corruptBaseDir = new Path(HBCKFsUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if (conf.get("hbase.hfile.quarantine.dir") != null) {
LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + corruptBaseDir);
}
@@ -171,7 +170,7 @@ protected void checkColFamDir(Path cfDir) throws IOException {
return;
}
- List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
+ List hfs = HBCKFsUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && !fs.exists(cfDir)) {
LOG.warn("Colfam Directory " + cfDir +
@@ -200,7 +199,7 @@ protected void checkMobColFamDir(Path cfDir) throws IOException {
return;
}
- List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
+ List hfs = HBCKFsUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && !fs.exists(cfDir)) {
LOG.warn("Mob colfam Directory " + cfDir +
@@ -298,7 +297,7 @@ protected void checkRegionDir(Path regionDir) throws IOException {
return;
}
- List cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
+ List cfs = HBCKFsUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (cfs.isEmpty() && !fs.exists(regionDir)) {
LOG.warn("Region Directory " + regionDir +
@@ -318,7 +317,7 @@ protected void checkRegionDir(Path regionDir) throws IOException {
*/
void checkTableDir(Path tableDir) throws IOException {
List rds =
- FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
+ HBCKFsUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
if (rds == null) {
if (!fs.exists(tableDir)) {
LOG.warn("Table Directory " + tableDir +
@@ -419,7 +418,7 @@ public Void call() throws IOException {
* @return An instance of MobRegionDirChecker.
*/
private MobRegionDirChecker createMobRegionDirChecker(Path tableDir) {
- TableName tableName = CommonFSUtils.getTableName(tableDir);
+ TableName tableName = HBCKFsUtils.getTableName(tableDir);
Path mobDir = MobUtils.getMobRegionPath(conf, tableName);
return new MobRegionDirChecker(mobDir);
}
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
index e2bce8c2c8..1b24b11ddf 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
@@ -22,10 +22,10 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hbase.HBCKFsUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -71,7 +71,7 @@ public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
- CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
+ conf.set("fs.defaultFS", HBCKFsUtils.getRootDir(conf).toString());
HBaseFsck fsck = new HBaseFsck(conf);
// Process command-line args.
@@ -86,8 +86,8 @@ public static void main(String[] args) throws Exception {
}
// update hbase root dir to user-specified base
i++;
- CommonFSUtils.setRootDir(conf, new Path(args[i]));
- CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
+ conf.set(HConstants.HBASE_DIR, new Path(args[i]).toString());
+ conf.set("fs.defaultFS", HBCKFsUtils.getRootDir(conf).toString());
} else if (cmd.equals("-sidelineDir")) {
if (i == args.length - 1) {
System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
index 1e8ba04686..deabf03731 100644
--- a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
@@ -51,8 +51,6 @@
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
@@ -410,7 +408,7 @@ private void deleteRegionInfo(Connection connection, RegionInfo region) {
private void deleteRegionDir(TableName tableName, String regionEncodedName) {
try {
- Path tableDir = CommonFSUtils.getTableDir(this.TEST_UTIL.getDataTestDirOnTestFS(), tableName);
+ Path tableDir = HBCKFsUtils.getTableDir(this.TEST_UTIL.getDataTestDirOnTestFS(), tableName);
Path regionPath = new Path(tableDir, regionEncodedName);
this.TEST_UTIL.getTestFileSystem().delete(regionPath, true);
} catch (IOException e) {