Skip to content

Commit

Permalink
HBASE-21959 - CompactionTool should close the store it uses for compa…
Browse files Browse the repository at this point in the history
…cting files, in order to properly archive compacted files.

Change-Id: If852529e79274a77eb08cac13936f02776232608
Signed-off-by: Xu Cang <xucang@apache.org>
  • Loading branch information
wchevreuil authored and xcangCRM committed Mar 21, 2019
1 parent f4de34a commit c1a64aa
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
}
}
} while (store.needsCompaction() && !compactOnce);
//We need to close the store properly, to make sure it will archive compacted files
store.close();
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import static org.junit.Assert.assertEquals;

import java.io.File;
import java.io.FileOutputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;

@Category({ MediumTests.class, RegionServerTests.class })
public class TestCompactionTool {

private final HBaseTestingUtility testUtil = new HBaseTestingUtility();

private HRegion region;
private final static byte[] qualifier = Bytes.toBytes("qf");
private Path rootDir;
private final TableName tableName = TableName.valueOf(getClass().getSimpleName());

@Before
public void setUp() throws Exception {
this.testUtil.startMiniCluster();
testUtil.createTable(tableName, HBaseTestingUtility.fam1);
String defaultFS = testUtil.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS");
Configuration config = HBaseConfiguration.create();
config.set("fs.defaultFS", defaultFS);
String configPath = this.getClass().getClassLoader()
.getResource("hbase-site.xml").getFile();
config.writeXml(new FileOutputStream(new File(configPath)));
rootDir = testUtil.getDefaultRootDirPath();
this.region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0);
}

@After
public void tearDown() throws Exception {
this.testUtil.shutdownMiniCluster();
testUtil.cleanupTestDir();
}

@Test
public void testCompactedFilesArchived() throws Exception {
for (int i = 0; i < 10; i++) {
this.putAndFlush(i);
}
HStore store = (HStore)region.getStore(HBaseTestingUtility.fam1);
assertEquals(10, store.getStorefilesCount());
Path tableDir = FSUtils.getTableDir(rootDir, region.getRegionInfo().getTable());
FileSystem fs = store.getFileSystem();
String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/"
+ Bytes.toString(HBaseTestingUtility.fam1);
FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath));
assertEquals(10, regionDirFiles.length);
int result = ToolRunner.run(HBaseConfiguration.create(), new CompactionTool(),
new String[]{"-compactOnce", "-major", storePath});
assertEquals(0,result);
regionDirFiles = fs.listStatus(new Path(storePath));
assertEquals(1, regionDirFiles.length);
}

private void putAndFlush(int key) throws Exception{
Put put = new Put(Bytes.toBytes(key));
put.addColumn(HBaseTestingUtility.fam1, qualifier, Bytes.toBytes("val" + key));
region.put(put);
region.flush(true);
}

}

0 comments on commit c1a64aa

Please sign in to comment.