diff --git a/.gitignore b/.gitignore index fc93b1447ba1..274a0740c85e 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ linklint/ **/*.log tmp **/.flattened-pom.xml +.sw* .*.sw* ID filenametags diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index b06ca9ce0d1e..05a1a4b0b66b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -27,7 +27,6 @@ import org.apache.commons.crypto.cipher.CryptoCipherFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; @@ -219,57 +218,6 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) return getUnwrapKey(conf, subject, wrappedKey, cipher, null); } - /** - * Helper to create an encyption context. - * @param conf The current configuration. - * @param family The current column descriptor. - * @return The created encryption context. - * @throws IOException if an encryption key for the column cannot be unwrapped - * @throws IllegalStateException in case of encryption related configuration errors - */ - public static Encryption.Context createEncryptionContext(Configuration conf, - ColumnFamilyDescriptor family) throws IOException { - Encryption.Context cryptoContext = Encryption.Context.NONE; - String cipherName = family.getEncryptionType(); - if (cipherName != null) { - if (!Encryption.isEncryptionEnabled(conf)) { - throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); - } - Cipher cipher; - Key key; - byte[] keyBytes = family.getEncryptionKey(); - if (keyBytes != null) { - // Family provides specific key material - key = unwrapKey(conf, keyBytes); - // Use the algorithm the key wants - cipher = Encryption.getCipher(conf, key.getAlgorithm()); - if (cipher == null) { - throw new IllegalStateException("Cipher '" + key.getAlgorithm() + "' is not available"); - } - // Fail if misconfigured - // We use the encryption type specified in the column schema as a sanity check on - // what the wrapped key is telling us - if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new IllegalStateException( - "Encryption for family '" + family.getNameAsString() + "' configured with type '" - + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); - } - } else { - // Family does not provide key material, create a random key - cipher = Encryption.getCipher(conf, cipherName); - if (cipher == null) { - throw new IllegalStateException("Cipher '" + cipherName + "' is not available"); - } - key = cipher.getRandomKey(); - } - cryptoContext = Encryption.newContext(conf); - cryptoContext.setCipher(cipher); - cryptoContext.setKey(key); - } - return cryptoContext; - } - /** * Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the * configured master and alternative keys, rather than having to specify a key type to unwrap diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index b9dfa9afc5d8..2dca4f7e452d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -343,6 +343,7 @@ public enum OperationStatusCode { /** Parameter name for HBase instance root directory */ public static final String HBASE_DIR = "hbase.rootdir"; + public static final String HBASE_ORIGINAL_DIR = "hbase.originalRootdir"; /** Parameter name for HBase client IPC pool type */ public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type"; @@ -1342,6 +1343,11 @@ public enum OperationStatusCode { "hbase.crypto.managed_keys.l1_active_cache.max_ns_entries"; public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100; + /** Enables or disables local key generation per file. */ + public static final String CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY = + "hbase.crypto.managed_keys.local_key_gen_per_file.enabled"; + public static final boolean CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED = false; + /** Configuration key for setting RPC codec class name */ public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec"; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java index ce32351fecdf..7e816b917628 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java @@ -34,6 +34,8 @@ public class Context implements Configurable { private Configuration conf; private Cipher cipher; private Key key; + private ManagedKeyData kekData; + private String keyNamespace; private String keyHash; Context(Configuration conf) { @@ -97,4 +99,22 @@ public Context setKey(Key key) { this.keyHash = new String(Hex.encodeHex(Encryption.computeCryptoKeyHash(conf, encoded))); return this; } + + public Context setKeyNamespace(String keyNamespace) { + this.keyNamespace = keyNamespace; + return this; + } + + public String getKeyNamespace() { + return keyNamespace; + } + + public Context setKEKData(ManagedKeyData kekData) { + this.kekData = kekData; + return this; + } + + public ManagedKeyData getKEKData() { + return kekData; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index a176a4329422..91af77361a0e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -117,12 +117,6 @@ public Context setCipher(Cipher cipher) { return this; } - @Override - public Context setKey(Key key) { - super.setKey(key); - return this; - } - public Context setKey(byte[] key) { super.setKey(new SecretKeySpec(key, getCipher().getName())); return this; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java index e9c00935d38e..ffd5dbb7b574 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java @@ -54,11 +54,17 @@ public class ManagedKeyData { */ public static final String KEY_SPACE_GLOBAL = "*"; + /** + * Special value to be used for custodian to indicate that it is global, meaning it is not + * associated with a specific custodian. + */ + public static final byte[] KEY_GLOBAL_CUSTODIAN_BYTES = KEY_SPACE_GLOBAL.getBytes(); + /** * Encoded form of global custodian. */ public static final String KEY_GLOBAL_CUSTODIAN = - ManagedKeyProvider.encodeToStr(KEY_SPACE_GLOBAL.getBytes()); + ManagedKeyProvider.encodeToStr(KEY_GLOBAL_CUSTODIAN_BYTES); private final byte[] keyCustodian; private final String keyNamespace; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java index 0bb2aef7d99b..39f460e062ae 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.crypto; import java.security.Key; +import java.util.HashMap; +import java.util.Map; import javax.crypto.spec.SecretKeySpec; import org.apache.yetus.audience.InterfaceAudience; @@ -27,8 +29,13 @@ @InterfaceAudience.Private public class MockAesKeyProvider implements KeyProvider { + private Map keys = new HashMap<>(); + + private boolean cacheKeys = false; + @Override public void init(String parameters) { + cacheKeys = Boolean.parseBoolean(parameters); } @Override @@ -40,7 +47,14 @@ public Key getKey(String name) { public Key[] getKeys(String[] aliases) { Key[] result = new Key[aliases.length]; for (int i = 0; i < aliases.length; i++) { - result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES"); + if (keys.containsKey(aliases[i])) { + result[i] = keys.get(aliases[i]); + } else { + result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES"); + if (cacheKeys) { + keys.put(aliases[i], result[i]); + } + } } return result; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index da4662d2c8a0..d79cb6f38873 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -288,17 +288,48 @@ public static String getPath(Path p) { * @throws IOException e */ public static Path getRootDir(final Configuration c) throws IOException { - Path p = new Path(c.get(HConstants.HBASE_DIR)); + return getRootDir(c, HConstants.HBASE_DIR); + } + + /** + * Get the path for the original root data directory, which could be different from the current + * root directory, in case it was changed. + * @param c configuration + * @return {@link Path} to hbase original root directory from configuration as a qualified Path. + * @throws IOException e + */ + public static Path getOriginalRootDir(final Configuration c) throws IOException { + return getRootDir(c, + c.get(HConstants.HBASE_ORIGINAL_DIR) == null + ? HConstants.HBASE_DIR + : HConstants.HBASE_ORIGINAL_DIR); + } + + /** + * Get the path for the root data directory + * @param c configuration + * @param rootDirProp the property name for the root directory + * @return {@link Path} to hbase root directory from configuration as a qualified Path. + * @throws IOException e + */ + public static Path getRootDir(final Configuration c, final String rootDirProp) + throws IOException { + Path p = new Path(c.get(rootDirProp)); FileSystem fs = p.getFileSystem(c); return p.makeQualified(fs.getUri(), fs.getWorkingDirectory()); } public static void setRootDir(final Configuration c, final Path root) { + // Keep track of the original root dir. + if (c.get(HConstants.HBASE_ORIGINAL_DIR) == null && c.get(HConstants.HBASE_DIR) != null) { + c.set(HConstants.HBASE_ORIGINAL_DIR, c.get(HConstants.HBASE_DIR)); + } c.set(HConstants.HBASE_DIR, root.toString()); } public static Path getSystemKeyDir(final Configuration c) throws IOException { - return new Path(getRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY); + // Always use the original root dir for system key dir, in case it was changed.. + return new Path(getOriginalRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY); } public static void setFsDefault(final Configuration c, final Path root) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java new file mode 100644 index 000000000000..3a8fb3d32464 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URLEncoder; +import java.security.KeyStore; +import java.security.MessageDigest; +import java.util.Base64; +import java.util.Map; +import java.util.Properties; +import java.util.function.Function; +import javax.crypto.spec.SecretKeySpec; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +public class KeymetaTestUtils { + + /** + * A ByteArrayInputStream that implements Seekable and PositionedReadable to work with + * FSDataInputStream. + */ + public static class SeekableByteArrayInputStream extends ByteArrayInputStream + implements Seekable, PositionedReadable { + + public SeekableByteArrayInputStream(byte[] buf) { + super(buf); + } + + @Override + public void seek(long pos) throws IOException { + if (pos < this.mark || pos > buf.length) { + throw new IOException("Seek position out of bounds: " + pos); + } + this.pos = (int) pos; + this.mark = (int) pos; + } + + @Override + public long getPos() throws IOException { + return pos; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; // No alternate sources + } + + @Override + public int read(long position, byte[] buffer, int offset, int length) throws IOException { + if (position < 0 || position >= buf.length) { + return -1; + } + int currentPos = pos; + seek(position); + int bytesRead = read(buffer, offset, length); + pos = currentPos; // Restore original position + return bytesRead; + } + + @Override + public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { + int totalBytesRead = 0; + while (totalBytesRead < length) { + int bytesRead = + read(position + totalBytesRead, buffer, offset + totalBytesRead, length - totalBytesRead); + if (bytesRead == -1) { + throw new IOException("Reached end of stream before reading fully"); + } + totalBytesRead += bytesRead; + } + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + readFully(position, buffer, 0, buffer.length); + } + } + + private KeymetaTestUtils() { + // Utility class + } + + public static final String ALIAS = "test"; + public static final String PASSWORD = "password"; + + public static void addEntry(Configuration conf, int keyLen, KeyStore store, String alias, + String custodian, boolean withPasswordOnAlias, Map cust2key, + Map cust2alias, Properties passwordFileProps) throws Exception { + Preconditions.checkArgument(keyLen == 256 || keyLen == 128, "Key length must be 256 or 128"); + byte[] key = + MessageDigest.getInstance(keyLen == 256 ? "SHA-256" : "MD5").digest(Bytes.toBytes(alias)); + cust2alias.put(new Bytes(custodian.getBytes()), alias); + cust2key.put(new Bytes(custodian.getBytes()), new Bytes(key)); + store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")), + new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); + String encCust = Base64.getEncoder().encodeToString(custodian.getBytes()); + String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "." + "alias"; + conf.set(confKey, alias); + if (passwordFileProps != null) { + passwordFileProps.setProperty(alias, PASSWORD); + } + } + + public static String setupTestKeyStore(HBaseCommonTestingUtil testUtil, + boolean withPasswordOnAlias, boolean withPasswordFile, + Function customEntriesAdder) throws Exception { + KeyStore store = KeyStore.getInstance("JCEKS"); + store.load(null, PASSWORD.toCharArray()); + Properties passwordProps = null; + if (customEntriesAdder != null) { + passwordProps = customEntriesAdder.apply(store); + } + // Create the test directory + String dataDir = testUtil.getDataTestDir().toString(); + new File(dataDir).mkdirs(); + // Write the keystore file + File storeFile = new File(dataDir, "keystore.jks"); + FileOutputStream os = new FileOutputStream(storeFile); + try { + store.store(os, PASSWORD.toCharArray()); + } finally { + os.close(); + } + File passwordFile = null; + if (withPasswordFile) { + passwordFile = new File(dataDir, "keystore.pw"); + os = new FileOutputStream(passwordFile); + try { + passwordProps.store(os, ""); + } finally { + os.close(); + } + } + String providerParams; + if (withPasswordFile) { + providerParams = "jceks://" + storeFile.toURI().getPath() + "?passwordFile=" + + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8"); + } else { + providerParams = "jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD; + } + return providerParams; + } + + public static FileStatus createMockFile(String fileName) { + Path mockPath = mock(Path.class); + when(mockPath.getName()).thenReturn(fileName); + FileStatus mockFileStatus = mock(FileStatus.class); + when(mockFileStatus.getPath()).thenReturn(mockPath); + return mockFileStatus; + } + + public static Path createMockPath(String tableName, String family) { + Path mockPath = mock(Path.class); + Path mockRegionDir = mock(Path.class); + Path mockTableDir = mock(Path.class); + Path mockNamespaceDir = mock(Path.class); + Path mockFamilyDir = mock(Path.class); + Path mockDataDir = mock(Path.class); + when(mockPath.getParent()).thenReturn(mockFamilyDir); + when(mockFamilyDir.getParent()).thenReturn(mockRegionDir); + when(mockRegionDir.getParent()).thenReturn(mockTableDir); + when(mockTableDir.getParent()).thenReturn(mockNamespaceDir); + when(mockNamespaceDir.getParent()).thenReturn(mockDataDir); + when(mockTableDir.getName()).thenReturn(tableName); + when(mockFamilyDir.getName()).thenReturn(family); + return mockPath; + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java index a0304e6337fb..bb19d4222001 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.io.crypto; +import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.ALIAS; +import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.PASSWORD; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import java.io.File; -import java.io.FileOutputStream; -import java.net.URLEncoder; import java.security.Key; import java.security.KeyStore; import java.security.MessageDigest; @@ -51,12 +50,8 @@ public class TestKeyStoreKeyProvider { HBaseClassTestRule.forClass(TestKeyStoreKeyProvider.class); static final HBaseCommonTestingUtil TEST_UTIL = new HBaseCommonTestingUtil(); - static final String ALIAS = "test"; - static final String PASSWORD = "password"; static byte[] KEY; - static File storeFile; - static File passwordFile; protected KeyProvider provider; @@ -75,40 +70,21 @@ public static Collection parameters() { @Before public void setUp() throws Exception { KEY = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(ALIAS)); - // Create a JKECS store containing a test secret key - KeyStore store = KeyStore.getInstance("JCEKS"); - store.load(null, PASSWORD.toCharArray()); - store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")), - new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); - Properties p = new Properties(); - addCustomEntries(store, p); - // Create the test directory - String dataDir = TEST_UTIL.getDataTestDir().toString(); - new File(dataDir).mkdirs(); - // Write the keystore file - storeFile = new File(dataDir, "keystore.jks"); - FileOutputStream os = new FileOutputStream(storeFile); - try { - store.store(os, PASSWORD.toCharArray()); - } finally { - os.close(); - } - // Write the password file - passwordFile = new File(dataDir, "keystore.pw"); - os = new FileOutputStream(passwordFile); - try { - p.store(os, ""); - } finally { - os.close(); - } - + String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, withPasswordOnAlias, + withPasswordFile, store -> { + Properties p = new Properties(); + try { + store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")), + new KeyStore.PasswordProtection( + withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); + addCustomEntries(store, p); + } catch (Exception e) { + throw new RuntimeException(e); + } + return p; + }); provider = createProvider(); - if (withPasswordFile) { - provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile=" - + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8")); - } else { - provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD); - } + provider.init(providerParams); } protected KeyProvider createProvider() { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java index 472ce56405a9..405c5731be94 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io.crypto; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES; import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_ALIAS; import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_CUST; import static org.junit.Assert.assertEquals; @@ -26,14 +27,12 @@ import static org.junit.Assert.assertTrue; import java.security.KeyStore; -import java.security.MessageDigest; import java.util.Arrays; import java.util.Base64; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.UUID; -import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -66,10 +65,10 @@ public static class TestManagedKeyStoreKeyProvider extends TestKeyStoreKeyProvid private static final String SYSTEM_KEY_ALIAS = "system-alias"; private Configuration conf = HBaseConfiguration.create(); - private int nPrefixes = 2; + private int nCustodians = 2; private ManagedKeyProvider managedKeyProvider; - private Map prefix2key = new HashMap<>(); - private Map prefix2alias = new HashMap<>(); + private Map cust2key = new HashMap<>(); + private Map cust2alias = new HashMap<>(); private String clusterId; private byte[] systemKey; @@ -86,41 +85,21 @@ protected KeyProvider createProvider() { protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception { super.addCustomEntries(store, passwdProps); - for (int i = 0; i < nPrefixes; ++i) { - String prefix = "prefix+ " + i; - String alias = prefix + "-alias"; - byte[] key = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(alias)); - prefix2alias.put(new Bytes(prefix.getBytes()), alias); - prefix2key.put(new Bytes(prefix.getBytes()), new Bytes(key)); - store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")), - new KeyStore.PasswordProtection( - withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); - - String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes()); - String confKey = - HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias"; - conf.set(confKey, alias); - - passwdProps.setProperty(alias, PASSWORD); - - clusterId = UUID.randomUUID().toString(); - systemKey = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(SYSTEM_KEY_ALIAS)); - store.setEntry(SYSTEM_KEY_ALIAS, - new KeyStore.SecretKeyEntry(new SecretKeySpec(systemKey, "AES")), - new KeyStore.PasswordProtection( - withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); - - conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS); - - passwdProps.setProperty(SYSTEM_KEY_ALIAS, PASSWORD); + for (int i = 0; i < nCustodians; ++i) { + String custodian = "custodian+ " + i; + String alias = custodian + "-alias"; + KeymetaTestUtils.addEntry(conf, 256, store, alias, custodian, withPasswordOnAlias, cust2key, + cust2alias, passwdProps); } - } - private void addEntry(String alias, String prefix) { - String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes()); - String confKey = - HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias"; - conf.set(confKey, alias); + clusterId = UUID.randomUUID().toString(); + KeymetaTestUtils.addEntry(conf, 256, store, SYSTEM_KEY_ALIAS, clusterId, withPasswordOnAlias, + cust2key, cust2alias, passwdProps); + systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get(); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS); + + KeymetaTestUtils.addEntry(conf, 256, store, "global-cust-alias", "*", withPasswordOnAlias, + cust2key, cust2alias, passwdProps); } @Test @@ -133,46 +112,54 @@ public void testMissingConfig() throws Exception { @Test public void testGetManagedKey() throws Exception { - for (Bytes prefix : prefix2key.keySet()) { + for (Bytes cust : cust2key.keySet()) { ManagedKeyData keyData = - managedKeyProvider.getManagedKey(prefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL); - assertKeyData(keyData, ManagedKeyState.ACTIVE, prefix2key.get(prefix).get(), prefix.get(), - prefix2alias.get(prefix)); + managedKeyProvider.getManagedKey(cust.get(), ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyData(keyData, ManagedKeyState.ACTIVE, cust2key.get(cust).get(), cust.get(), + cust2alias.get(cust)); } } + @Test + public void testGetGlobalCustodianKey() throws Exception { + byte[] globalCustodianKey = cust2key.get(new Bytes(KEY_GLOBAL_CUSTODIAN_BYTES)).get(); + ManagedKeyData keyData = managedKeyProvider.getManagedKey(KEY_GLOBAL_CUSTODIAN_BYTES, + ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyData(keyData, ManagedKeyState.ACTIVE, globalCustodianKey, KEY_GLOBAL_CUSTODIAN_BYTES, + "global-cust-alias"); + } + @Test public void testGetInactiveKey() throws Exception { - Bytes firstPrefix = prefix2key.keySet().iterator().next(); - String encPrefix = Base64.getEncoder().encodeToString(firstPrefix.get()); - conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + ".active", - "false"); + Bytes firstCust = cust2key.keySet().iterator().next(); + String encCust = Base64.getEncoder().encodeToString(firstCust.get()); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + ".active", "false"); ManagedKeyData keyData = - managedKeyProvider.getManagedKey(firstPrefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL); + managedKeyProvider.getManagedKey(firstCust.get(), ManagedKeyData.KEY_SPACE_GLOBAL); assertNotNull(keyData); - assertKeyData(keyData, ManagedKeyState.INACTIVE, prefix2key.get(firstPrefix).get(), - firstPrefix.get(), prefix2alias.get(firstPrefix)); + assertKeyData(keyData, ManagedKeyState.INACTIVE, cust2key.get(firstCust).get(), + firstCust.get(), cust2alias.get(firstCust)); } @Test public void testGetInvalidKey() throws Exception { - byte[] invalidPrefixBytes = "invalid".getBytes(); + byte[] invalidCustBytes = "invalid".getBytes(); ManagedKeyData keyData = - managedKeyProvider.getManagedKey(invalidPrefixBytes, ManagedKeyData.KEY_SPACE_GLOBAL); + managedKeyProvider.getManagedKey(invalidCustBytes, ManagedKeyData.KEY_SPACE_GLOBAL); assertNotNull(keyData); - assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefixBytes, null); + assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidCustBytes, null); } @Test public void testGetDisabledKey() throws Exception { - byte[] invalidPrefix = new byte[] { 1, 2, 3 }; - String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); - conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active", + byte[] invalidCust = new byte[] { 1, 2, 3 }; + String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active", "false"); ManagedKeyData keyData = - managedKeyProvider.getManagedKey(invalidPrefix, ManagedKeyData.KEY_SPACE_GLOBAL); + managedKeyProvider.getManagedKey(invalidCust, ManagedKeyData.KEY_SPACE_GLOBAL); assertNotNull(keyData); - assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, null); + assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, null); } @Test @@ -192,31 +179,31 @@ public void testGetSystemKey() throws Exception { @Test public void testUnwrapInvalidKey() throws Exception { String invalidAlias = "invalidAlias"; - byte[] invalidPrefix = new byte[] { 1, 2, 3 }; - String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); + byte[] invalidCust = new byte[] { 1, 2, 3 }; + String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust); String invalidMetadata = - ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc); + ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidCustEnc); ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null); assertNotNull(keyData); - assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix, invalidAlias); + assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidCust, invalidAlias); } @Test public void testUnwrapDisabledKey() throws Exception { String invalidAlias = "invalidAlias"; - byte[] invalidPrefix = new byte[] { 1, 2, 3 }; - String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); - conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active", + byte[] invalidCust = new byte[] { 1, 2, 3 }; + String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active", "false"); String invalidMetadata = - ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc); + ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidCustEnc); ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null); assertNotNull(keyData); - assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, invalidAlias); + assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, invalidAlias); } private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, byte[] key, - byte[] prefixBytes, String alias) throws Exception { + byte[] custBytes, String alias) throws Exception { assertNotNull(keyData); assertEquals(expKeyState, keyData.getKeyState()); if (key == null) { @@ -229,9 +216,9 @@ private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(), HashMap.class); assertNotNull(keyMetadata); - assertEquals(new Bytes(prefixBytes), keyData.getKeyCustodian()); + assertEquals(new Bytes(custBytes), keyData.getKeyCustodian()); assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS)); - assertEquals(Base64.getEncoder().encodeToString(prefixBytes), + assertEquals(Base64.getEncoder().encodeToString(custBytes), keyMetadata.get(KEY_METADATA_CUST)); assertEquals(keyData, managedKeyProvider.unwrapKey(keyData.getKeyMetadata(), null)); } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto b/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto index fd1b9b3680d8..26a343a5d04f 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto @@ -51,4 +51,7 @@ message FileTrailerProto { optional string comparator_class_name = 11; optional uint32 compression_codec = 12; optional bytes encryption_key = 13; + optional string key_namespace = 14; + optional string kek_metadata = 15; + optional uint64 kek_checksum = 16; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index 12cc7433e7be..0993fc0f09da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl; import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; @@ -92,7 +93,7 @@ */ @InterfaceAudience.Private public abstract class HBaseServerBase> extends Thread - implements Server, ConfigurationObserver, ConnectionRegistryEndpoint { + implements Server, ConfigurationObserver, ConnectionRegistryEndpoint, KeyManagementService { private static final Logger LOG = LoggerFactory.getLogger(HBaseServerBase.class); @@ -661,6 +662,11 @@ public void updateConfiguration() throws IOException { postUpdateConfiguration(); } + @Override + public KeyManagementService getKeyManagementService() { + return this; + } + private void preUpdateConfiguration() throws IOException { CoprocessorHost coprocessorHost = getCoprocessorHost(); if (coprocessorHost instanceof RegionServerCoprocessorHost) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 39d09ab170f3..4b5d36382eff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; import org.apache.hadoop.hbase.keymeta.SystemKeyCache; @@ -404,4 +405,9 @@ public AsyncClusterConnection getAsyncClusterConnection() { public RegionReplicationBufferManager getRegionReplicationBufferManager() { return null; } + + @Override + public KeyManagementService getKeyManagementService() { + return this; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index c1a6d7dc9ec8..ba258d14add9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -23,9 +23,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -86,15 +84,6 @@ default AsyncConnection getAsyncConnection() { /** Returns The {@link ChoreService} instance for this server */ ChoreService getChoreService(); - /** Returns the cache for cluster keys. */ - public SystemKeyCache getSystemKeyCache(); - - /** Returns the cache for managed keys. */ - public ManagedKeyDataCache getManagedKeyDataCache(); - - /** Returns the admin for keymeta. */ - public KeymetaAdmin getKeymetaAdmin(); - /** Returns Return the FileSystem object used (can return null!). */ // TODO: Distinguish between "dataFs" and "walFs". default FileSystem getFileSystem() { @@ -116,4 +105,7 @@ default FileSystem getFileSystem() { default boolean isStopping() { return false; } + + /** Returns the KeyManagementService instance for this server. */ + KeyManagementService getKeyManagementService(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index df99fd403387..eb7c77554b02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -63,7 +63,7 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, // open region from the snapshot directory region = HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, htd.getTableName()), null, fs, - conf, hri, htd, null); + conf, hri, htd, null, null); region.setRestoredRegion(true); // non RS process does not have a block cache, and this a client side scanner, // create one for MapReduce jobs to cache the INDEX block by setting to use diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index bd5fac1c3c45..85201ccd8bdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -174,6 +174,15 @@ public Path getMobPath() { return this.mobPath; } + /** + * Get the table name and family name from the origin path. + * @return the table name and family name + */ + public Pair getTableNameAndFamilyName() { + return new Pair<>(this.originPath.getParent().getName(), + this.originPath.getParent().getParent().getParent().getName()); + } + /** * @param path Path to check. * @return True if the path is a HFileLink. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 74b560022a8b..d3337d24712c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -130,6 +130,21 @@ public class FixedFileTrailer { */ private byte[] encryptionKey; + /** + * The key namespace + */ + private String keyNamespace; + + /** + * The KEK checksum + */ + private long kekChecksum; + + /** + * The KEK metadata + */ + private String kekMetadata; + /** * The {@link HFile} format major version. */ @@ -211,6 +226,15 @@ HFileProtos.FileTrailerProto toProtobuf() { if (encryptionKey != null) { builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); } + if (keyNamespace != null) { + builder.setKeyNamespace(keyNamespace); + } + if (kekMetadata != null) { + builder.setKekMetadata(kekMetadata); + } + if (kekChecksum != 0) { + builder.setKekChecksum(kekChecksum); + } return builder.build(); } @@ -313,6 +337,15 @@ void deserializeFromPB(DataInputStream inputStream) throws IOException { if (trailerProto.hasEncryptionKey()) { encryptionKey = trailerProto.getEncryptionKey().toByteArray(); } + if (trailerProto.hasKeyNamespace()) { + keyNamespace = trailerProto.getKeyNamespace(); + } + if (trailerProto.hasKekMetadata()) { + kekMetadata = trailerProto.getKekMetadata(); + } + if (trailerProto.hasKekChecksum()) { + kekChecksum = trailerProto.getKekChecksum(); + } } /** @@ -362,6 +395,9 @@ public String toString() { if (majorVersion >= 3) { append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE")); } + if (keyNamespace != null) { + append(sb, "keyNamespace=" + keyNamespace); + } append(sb, "majorVersion=" + majorVersion); append(sb, "minorVersion=" + minorVersion); @@ -641,10 +677,34 @@ public byte[] getEncryptionKey() { return encryptionKey; } + public String getKeyNamespace() { + return keyNamespace; + } + + public void setKeyNamespace(String keyNamespace) { + this.keyNamespace = keyNamespace; + } + + public void setKEKChecksum(long kekChecksum) { + this.kekChecksum = kekChecksum; + } + + public long getKEKChecksum() { + return kekChecksum; + } + public void setEncryptionKey(byte[] keyBytes) { this.encryptionKey = keyBytes; } + public String getKEKMetadata() { + return kekMetadata; + } + + public void setKEKMetadata(String kekMetadata) { + this.kekMetadata = kekMetadata; + } + /** * Extracts the major version for a 4-byte serialized version data. The major version is the 3 * least significant bytes diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index a99eac4085e4..6392b36ef12f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -41,9 +41,13 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -554,10 +558,18 @@ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheCon boolean primaryReplicaReader, Configuration conf) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); + KeyManagementService keyManagementService = SecurityUtil.isKeyManagementEnabled(conf) + ? KeyManagementService.createDefault(conf, fs) + : null; + ManagedKeyDataCache managedKeyDataCache = + keyManagementService != null ? keyManagementService.getManagedKeyDataCache() : null; + SystemKeyCache systemKeyCache = + keyManagementService != null ? keyManagementService.getSystemKeyCache() : null; ReaderContext context = new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream) .withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs()) - .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build(); + .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD) + .withManagedKeyDataCache(managedKeyDataCache).withSystemKeyCache(systemKeyCache).build(); HFileInfo fileInfo = new HFileInfo(context, conf); Reader reader = createReader(context, fileInfo, cacheConf, conf); fileInfo.initMetaAndIndex(reader); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 2386e8d82a56..b3da98f13434 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -22,7 +22,6 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.SequenceInputStream; -import java.security.Key; import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; @@ -39,10 +38,8 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.io.crypto.Cipher; -import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; -import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -351,7 +348,7 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize()); Path path = context.getFilePath(); checkFileVersion(path); - this.hfileContext = createHFileContext(path, trailer, conf); + this.hfileContext = createHFileContext(context, path, trailer, conf); context.getInputStreamWrapper().unbuffer(); } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), @@ -409,30 +406,16 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { initialized = true; } - private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf) - throws IOException { - HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true) - .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec()) + private HFileContext createHFileContext(ReaderContext readerContext, Path path, + FixedFileTrailer trailer, Configuration conf) throws IOException { + return new HFileContextBuilder().withHBaseCheckSum(true).withHFileName(path.getName()) + .withCompression(trailer.getCompressionCodec()) .withDecompressionContext( trailer.getCompressionCodec().getHFileDecompressionContextForConfiguration(conf)) - .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())); - // Check for any key material available - byte[] keyBytes = trailer.getEncryptionKey(); - if (keyBytes != null) { - Encryption.Context cryptoContext = Encryption.newContext(conf); - Key key = EncryptionUtil.unwrapKey(conf, keyBytes); - // Use the algorithm the key wants - Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); - if (cipher == null) { - throw new IOException( - "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path); - } - cryptoContext.setCipher(cipher); - cryptoContext.setKey(key); - builder.withEncryptionContext(cryptoContext); - } - HFileContext context = builder.build(); - return context; + .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())) + .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, path, trailer, + readerContext.getManagedKeyDataCache(), readerContext.getSystemKeyCache())) + .build(); } private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfileContext) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 684aee3beaca..2b74d177a4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.security.Key; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; @@ -877,12 +879,33 @@ protected void finishClose(FixedFileTrailer trailer) throws IOException { // Write out encryption metadata before finalizing if we have a valid crypto context Encryption.Context cryptoContext = hFileContext.getEncryptionContext(); if (cryptoContext != Encryption.Context.NONE) { + String wrapperSubject = null; + Key encKey = null; + Key wrapperKey = null; + ManagedKeyData kekData = cryptoContext.getKEKData(); + String keyNamespace = cryptoContext.getKeyNamespace(); + String kekMetadata = null; + long kekChecksum = 0; + if (kekData != null) { + kekMetadata = kekData.getKeyMetadata(); + kekChecksum = kekData.getKeyChecksum(); + wrapperKey = kekData.getTheKey(); + encKey = cryptoContext.getKey(); + } else { + wrapperSubject = cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, + User.getCurrent().getShortName()); + encKey = cryptoContext.getKey(); + } // Wrap the context's key and write it as the encryption metadata, the wrapper includes // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey( - cryptoContext.getConf(), cryptoContext.getConf() - .get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), - cryptoContext.getKey())); + if (encKey != null) { + byte[] wrappedKey = + EncryptionUtil.wrapKey(cryptoContext.getConf(), wrapperSubject, encKey, wrapperKey); + trailer.setEncryptionKey(wrappedKey); + } + trailer.setKeyNamespace(keyNamespace); + trailer.setKEKMetadata(kekMetadata); + trailer.setKEKChecksum(kekChecksum); } // Now we can finish the close trailer.setMetaIndexCount(metaNames.size()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java index d6f711d866eb..ac2031b723a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java @@ -21,6 +21,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.yetus.audience.InterfaceAudience; /** @@ -41,9 +43,12 @@ public enum ReaderType { private final boolean primaryReplicaReader; private final ReaderType type; private final boolean preadAllBytes; + private final SystemKeyCache systemKeyCache; + private final ManagedKeyDataCache managedKeyDataCache; public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSize, - HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) { + HFileSystem hfs, boolean primaryReplicaReader, ReaderType type, SystemKeyCache systemKeyCache, + ManagedKeyDataCache managedKeyDataCache) { this.filePath = filePath; this.fsdis = fsdis; this.fileSize = fileSize; @@ -52,6 +57,8 @@ public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSiz this.type = type; this.preadAllBytes = hfs.getConf().getBoolean(HConstants.HFILE_PREAD_ALL_BYTES_ENABLED_KEY, HConstants.HFILE_PREAD_ALL_BYTES_ENABLED_DEFAULT); + this.systemKeyCache = systemKeyCache; + this.managedKeyDataCache = managedKeyDataCache; } public Path getFilePath() { @@ -81,4 +88,12 @@ public ReaderType getReaderType() { public boolean isPreadAllBytes() { return preadAllBytes; } + + public SystemKeyCache getSystemKeyCache() { + return this.systemKeyCache; + } + + public ManagedKeyDataCache getManagedKeyDataCache() { + return this.managedKeyDataCache; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java index 718f7fcb78a6..1490299ab1f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,6 +41,8 @@ public class ReaderContextBuilder { private HFileSystem hfs; private boolean primaryReplicaReader = true; private ReaderType type = ReaderType.PREAD; + private SystemKeyCache systemKeyCache; + private ManagedKeyDataCache managedKeyDataCache; public ReaderContextBuilder() { } @@ -53,6 +57,8 @@ private ReaderContextBuilder(ReaderContext readerContext) { this.fileSize = readerContext.getFileSize(); this.hfs = readerContext.getFileSystem(); this.type = readerContext.getReaderType(); + this.systemKeyCache = readerContext.getSystemKeyCache(); + this.managedKeyDataCache = readerContext.getManagedKeyDataCache(); } public ReaderContextBuilder withFilePath(Path filePath) { @@ -101,9 +107,20 @@ public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath) return this; } + public ReaderContextBuilder withManagedKeyDataCache(ManagedKeyDataCache managedKeyDataCache) { + this.managedKeyDataCache = managedKeyDataCache; + return this; + } + + public ReaderContextBuilder withSystemKeyCache(SystemKeyCache systemKeyCache) { + this.systemKeyCache = systemKeyCache; + return this; + } + public ReaderContext build() { validateFields(); - return new ReaderContext(filePath, fsdis, fileSize, hfs, primaryReplicaReader, type); + return new ReaderContext(filePath, fsdis, fileSize, hfs, primaryReplicaReader, type, + systemKeyCache, managedKeyDataCache); } private void validateFields() throws IllegalArgumentException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java index 1e4ee2a3e796..957c3c8f726d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java @@ -21,7 +21,6 @@ import java.security.KeyException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProvider; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; @@ -38,20 +37,19 @@ public abstract class KeyManagementBase { protected static final Logger LOG = LoggerFactory.getLogger(KeyManagementBase.class); - private Server server; + private KeyManagementService keyManagementService; private final Configuration configuration; private Boolean isDynamicLookupEnabled; private Boolean isKeyManagementEnabled; - private Integer perCustNamespaceActiveKeyCount; /** * Construct with a server instance. Configuration is derived from the server. * @param server the server instance */ - public KeyManagementBase(Server server) { - this(server.getConfiguration()); - this.server = server; + public KeyManagementBase(KeyManagementService keyManagementService) { + this(keyManagementService.getConfiguration()); + this.keyManagementService = keyManagementService; } /** @@ -65,8 +63,8 @@ public KeyManagementBase(Configuration configuration) { this.configuration = configuration; } - protected Server getServer() { - return server; + protected KeyManagementService getKeyManagementService() { + return keyManagementService; } protected Configuration getConfiguration() { @@ -150,7 +148,7 @@ protected ManagedKeyData retrieveActiveKey(String encKeyCust, byte[] key_cust, LOG.info( "retrieveManagedKey: got managed key with status: {} and metadata: {} for " + "(custodian: {}, namespace: {})", - pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, keyNamespace); + pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, pbeKey.getKeyNamespace()); if (accessor != null) { accessor.addKey(pbeKey); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java new file mode 100644 index 000000000000..bdb76f5bbe6d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public interface KeyManagementService { + class DefaultKeyManagementService implements KeyManagementService { + private final Configuration configuration; + private final ManagedKeyDataCache managedKeyDataCache; + private final SystemKeyCache systemKeyCache; + + public DefaultKeyManagementService(Configuration configuration, FileSystem fs) { + this.configuration = configuration; + this.managedKeyDataCache = new ManagedKeyDataCache(configuration, null); + try { + this.systemKeyCache = SystemKeyCache.createCache(configuration, fs); + } catch (IOException e) { + throw new RuntimeException("Failed to create system key cache", e); + } + } + + @Override + public SystemKeyCache getSystemKeyCache() { + return systemKeyCache; + } + + @Override + public ManagedKeyDataCache getManagedKeyDataCache() { + return managedKeyDataCache; + } + + @Override + public KeymetaAdmin getKeymetaAdmin() { + throw new UnsupportedOperationException("KeymetaAdmin is not supported"); + } + + @Override + public Configuration getConfiguration() { + return configuration; + } + } + + static KeyManagementService createDefault(Configuration configuration, FileSystem fs) { + return new DefaultKeyManagementService(configuration, fs); + } + + /** Returns the cache for cluster keys. */ + public SystemKeyCache getSystemKeyCache(); + + /** Returns the cache for managed keys. */ + public ManagedKeyDataCache getManagedKeyDataCache(); + + /** Returns the admin for keymeta. */ + public KeymetaAdmin getKeymetaAdmin(); + + /** Returns the configuration. */ + public Configuration getConfiguration(); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java new file mode 100644 index 000000000000..52b6adddc6f7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * Utility class for constructing key namespaces used in key management operations. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class KeyNamespaceUtil { + + /** + * Construct a key namespace from a table descriptor and column family descriptor. + * @param tableDescriptor The table descriptor + * @param family The column family descriptor + * @return The constructed key namespace + */ + public static String constructKeyNamespace(TableDescriptor tableDescriptor, + ColumnFamilyDescriptor family) { + return tableDescriptor.getTableName().getNameAsString() + "/" + family.getNameAsString(); + } + + /** + * Construct a key namespace from a store context. + * @param storeContext The store context + * @return The constructed key namespace + */ + public static String constructKeyNamespace(StoreContext storeContext) { + return storeContext.getTableName().getNameAsString() + "/" + + storeContext.getFamily().getNameAsString(); + } + + /** + * Construct a key namespace by deriving table name and family name from a store file info. + * @param fileInfo The store file info + * @return The constructed key namespace + */ + public static String constructKeyNamespace(StoreFileInfo fileInfo) { + return constructKeyNamespace( + fileInfo.isLink() ? fileInfo.getLink().getOriginPath() : fileInfo.getPath()); + } + + /** + * Construct a key namespace by deriving table name and family name from a store file path. + * @param path The path + * @return The constructed key namespace + */ + public static String constructKeyNamespace(Path path) { + return constructKeyNamespace(path.getParent().getParent().getParent().getName(), + path.getParent().getName()); + } + + /** + * Construct a key namespace from a table name and family name. + * @param tableName The table name + * @param family The family name + * @return The constructed key namespace + */ + public static String constructKeyNamespace(String tableName, String family) { + // Add precoditions for null check + Preconditions.checkNotNull(tableName, "tableName should not be null"); + Preconditions.checkNotNull(family, "family should not be null"); + return tableName + "/" + family; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java index 07b73376fa5f..4eb19a602cc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java @@ -137,8 +137,7 @@ public void getManagedKeys(RpcController controller, ManagedKeysRequest request, @InterfaceAudience.Private public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller, ManagedKeysRequest request) { - ManagedKeysResponse.Builder builder = - ManagedKeysResponse.newBuilder().setKeyNamespace(request.getKeyNamespace()); + ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder(); byte[] key_cust = convertToKeyCustBytes(controller, request, builder); if (key_cust != null) { builder.setKeyCustBytes(ByteString.copyFrom(key_cust)); @@ -152,9 +151,10 @@ public static GetManagedKeysResponse generateKeyStateResponse( List managedKeyStates, ManagedKeysResponse.Builder builder) { GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder(); for (ManagedKeyData keyData : managedKeyStates) { - builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(keyData.getKeyState().getVal())) - .setKeyMetadata(keyData.getKeyMetadata()) - .setRefreshTimestamp(keyData.getRefreshTimestamp()); + builder + .setKeyState(ManagedKeysProtos.ManagedKeyState.forNumber(keyData.getKeyState().getVal())) + .setKeyMetadata(keyData.getKeyMetadata()).setRefreshTimestamp(keyData.getRefreshTimestamp()) + .setKeyNamespace(keyData.getKeyNamespace()); responseBuilder.addState(builder.build()); } return responseBuilder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java index 6862e35ddf10..8e2a7095cfca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java @@ -21,7 +21,7 @@ import java.security.Key; import java.security.KeyException; import java.util.ArrayList; -import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -76,8 +76,15 @@ public class KeymetaTableAccessor extends KeyManagementBase { public static final String KEY_STATE_QUAL_NAME = "k"; public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME); + private Server server; + public KeymetaTableAccessor(Server server) { - super(server); + super(server.getKeyManagementService()); + this.server = server; + } + + public Server getServer() { + return server; } /** @@ -113,8 +120,7 @@ public List getAllKeys(byte[] key_cust, String keyNamespace) throws IOException, KeyException { assertKeyManagementEnabled(); Connection connection = getServer().getConnection(); - byte[] prefixForScan = - Bytes.add(Bytes.toBytes(key_cust.length), key_cust, Bytes.toBytes(keyNamespace)); + byte[] prefixForScan = constructRowKeyForCustNamespace(key_cust, keyNamespace); PrefixFilter prefixFilter = new PrefixFilter(prefixForScan); Scan scan = new Scan(); scan.setFilter(prefixFilter); @@ -122,9 +128,10 @@ public List getAllKeys(byte[] key_cust, String keyNamespace) try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { ResultScanner scanner = table.getScanner(scan); - Set allKeys = new HashSet<>(); + Set allKeys = new LinkedHashSet<>(); for (Result result : scanner) { - ManagedKeyData keyData = parseFromResult(getServer(), key_cust, keyNamespace, result); + ManagedKeyData keyData = + parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result); if (keyData != null) { allKeys.add(keyData); } @@ -147,11 +154,10 @@ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace) Connection connection = getServer().getConnection(); byte[] rowkeyForGet = constructRowKeyForCustNamespace(key_cust, keyNamespace); Get get = new Get(rowkeyForGet); - get.addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES); try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { Result result = table.get(get); - return parseFromResult(getServer(), key_cust, keyNamespace, result); + return parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result); } } @@ -200,7 +206,7 @@ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace, try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, keyMetadataHash); Result result = table.get(new Get(rowKey)); - return parseFromResult(getServer(), key_cust, keyNamespace, result); + return parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result); } } @@ -208,10 +214,11 @@ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace, * Add the mutation columns to the given Put that are derived from the keyData. */ private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException { - ManagedKeyData latestSystemKey = getServer().getSystemKeyCache().getLatestSystemKey(); + ManagedKeyData latestSystemKey = + getKeyManagementService().getSystemKeyCache().getLatestSystemKey(); if (keyData.getTheKey() != null) { - byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null, - keyData.getTheKey(), latestSystemKey.getTheKey()); + byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getConfiguration(), null, keyData.getTheKey(), + latestSystemKey.getTheKey()); put .addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES, Bytes.toBytes(keyData.getKeyChecksum())) @@ -261,12 +268,12 @@ public static byte[] constructRowKeyForCustNamespace(ManagedKeyData keyData) { @InterfaceAudience.Private public static byte[] constructRowKeyForCustNamespace(byte[] key_cust, String keyNamespace) { int custLength = key_cust.length; - return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytesBinary(keyNamespace)); + return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytes(keyNamespace)); } @InterfaceAudience.Private - public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace, - Result result) throws IOException, KeyException { + public static ManagedKeyData parseFromResult(KeyManagementService keyManagementService, + byte[] key_cust, String keyNamespace, Result result) throws IOException, KeyException { if (result == null || result.isEmpty()) { return null; } @@ -285,13 +292,14 @@ public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, Str if (dekWrappedByStk != null) { long stkChecksum = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES)); - ManagedKeyData clusterKey = server.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum); + ManagedKeyData clusterKey = + keyManagementService.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum); if (clusterKey == null) { LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable", dekMetadata, stkChecksum); return null; } - dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk, + dek = EncryptionUtil.unwrapKey(keyManagementService.getConfiguration(), null, dekWrappedByStk, clusterKey.getTheKey()); } long refreshedTimestamp = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java index ecac8e1a2857..8de01319e25b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -35,14 +36,24 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class SystemKeyAccessor extends KeyManagementBase { + private static final Logger LOG = LoggerFactory.getLogger(SystemKeyAccessor.class); + + private final FileSystem fs; protected final Path systemKeyDir; public SystemKeyAccessor(Server server) throws IOException { - super(server); - this.systemKeyDir = CommonFSUtils.getSystemKeyDir(server.getConfiguration()); + this(server.getConfiguration(), server.getFileSystem()); + } + + public SystemKeyAccessor(Configuration configuration, FileSystem fs) throws IOException { + super(configuration); + this.systemKeyDir = CommonFSUtils.getSystemKeyDir(configuration); + this.fs = fs; } /** @@ -52,9 +63,7 @@ public SystemKeyAccessor(Server server) throws IOException { * is initialized yet. */ public Pair> getLatestSystemKeyFile() throws IOException { - if (!isKeyManagementEnabled()) { - return new Pair<>(null, null); - } + assertKeyManagementEnabled(); List allClusterKeyFiles = getAllSystemKeyFiles(); if (allClusterKeyFiles.isEmpty()) { throw new RuntimeException("No cluster key initialized yet"); @@ -72,17 +81,15 @@ public Pair> getLatestSystemKeyFile() throws IOException { * @throws IOException if there is an error getting the cluster key files */ public List getAllSystemKeyFiles() throws IOException { - if (!isKeyManagementEnabled()) { - return null; - } - FileSystem fs = getServer().getFileSystem(); + assertKeyManagementEnabled(); + LOG.info("Getting all system key files from: {} matching prefix: {}", systemKeyDir, + SYSTEM_KEY_FILE_PREFIX + "*"); Map clusterKeys = new TreeMap<>(Comparator.reverseOrder()); for (FileStatus st : fs.globStatus(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))) { Path keyPath = st.getPath(); int seqNum = extractSystemKeySeqNum(keyPath); clusterKeys.put(seqNum, keyPath); } - return new ArrayList<>(clusterKeys.values()); } @@ -130,7 +137,7 @@ public static int extractKeySequence(Path clusterKeyFile) throws IOException { } protected String loadKeyMetadata(Path keyPath) throws IOException { - try (FSDataInputStream fin = getServer().getFileSystem().open(keyPath)) { + try (FSDataInputStream fin = fs.open(keyPath)) { return fin.readUTF(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java index bb7a6e3f6935..bcdf2ae11cf0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.yetus.audience.InterfaceAudience; @@ -35,6 +37,19 @@ public class SystemKeyCache { private final ManagedKeyData latestSystemKey; private final Map systemKeys; + /** + * Create a SystemKeyCache from the specified configuration and file system. + * @param configuration the configuration to use + * @param fs the file system to use + * @return the cache or {@code null} if no keys are found. + * @throws IOException if there is an error loading the system keys + */ + public static SystemKeyCache createCache(Configuration configuration, FileSystem fs) + throws IOException { + SystemKeyAccessor accessor = new SystemKeyAccessor(configuration, fs); + return createCache(accessor); + } + /** * Construct the System Key cache from the specified accessor. * @param accessor the accessor to use to load the system keys diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 0573b1a75628..dee9b48f9ea5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; @@ -67,7 +68,7 @@ * adding API. Changes cause ripples through the code base. */ @InterfaceAudience.Private -public interface MasterServices extends Server { +public interface MasterServices extends Server, KeyManagementService { /** Returns the underlying snapshot manager */ SnapshotManager getSnapshotManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java index 2ca423bad8e9..de0e37dde275 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java @@ -30,9 +30,13 @@ import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class SystemKeyManager extends SystemKeyAccessor { + private static final Logger LOG = LoggerFactory.getLogger(SystemKeyManager.class); + private final MasterServices master; public SystemKeyManager(MasterServices master) throws IOException { @@ -63,7 +67,7 @@ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException { return null; } Pair> latestFileResult = getLatestSystemKeyFile(); - Path latestFile = getLatestSystemKeyFile().getFirst(); + Path latestFile = latestFileResult.getFirst(); String latestKeyMetadata = loadKeyMetadata(latestFile); return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 423297f667d3..59a8285b2f65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -360,8 +360,7 @@ public List createHdfsRegions(final MasterProcedureEnv env, throws IOException { RegionInfo[] regions = newRegions != null ? newRegions.toArray(new RegionInfo[newRegions.size()]) : null; - return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), tableRootDir, - tableDescriptor, regions, null); + return ModifyRegionUtils.createRegions(env, tableRootDir, tableDescriptor, regions, null); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index 8b4901e90e85..2d54eaf6c58c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -75,10 +75,10 @@ public TableOperationType getTableOperationType() { return TableOperationType.CREATE; } - private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) + private static TableDescriptor writeFsLayout(Path rootDir, MasterProcedureEnv env) throws IOException { LOG.info("BOOTSTRAP: creating hbase:meta region"); - FileSystem fs = rootDir.getFileSystem(conf); + FileSystem fs = rootDir.getFileSystem(env.getMasterConfiguration()); Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, tableDir)) { LOG.warn("Can not delete partial created meta table, continue..."); @@ -87,10 +87,11 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) // created here in bootstrap and it'll need to be cleaned up. Better to // not make it in first place. Turn off block caching for bootstrap. // Enable after. - TableDescriptor metaDescriptor = - FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); + TableDescriptor metaDescriptor = FSTableDescriptors + .tryUpdateAndGetMetaTableDescriptor(env.getMasterConfiguration(), fs, rootDir); HRegion - .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) + .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, env.getMasterConfiguration(), + metaDescriptor, null, env.getMasterServices().getKeyManagementService()) .close(); return metaDescriptor; } @@ -104,7 +105,7 @@ protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) case INIT_META_WRITE_FS_LAYOUT: Configuration conf = env.getMasterConfiguration(); Path rootDir = CommonFSUtils.getRootDir(conf); - TableDescriptor td = writeFsLayout(rootDir, conf); + TableDescriptor td = writeFsLayout(rootDir, env); env.getMasterServices().getTableDescriptors().update(td, true); setNextState(InitMetaState.INIT_META_ASSIGN_META); return Flow.HAS_MORE_STATE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index 97447e37b7c4..0539fb6250a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ConnectionUtils; @@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.log.HBaseMarkers; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; @@ -114,7 +114,7 @@ public final class MasterRegion { private static final int REGION_ID = 1; - private final Server server; + private final MasterServices server; private final WALFactory walFactory; @@ -128,7 +128,7 @@ public final class MasterRegion { private final long regionUpdateRetryPauseTime; - private MasterRegion(Server server, HRegion region, WALFactory walFactory, + private MasterRegion(MasterServices server, HRegion region, WALFactory walFactory, MasterRegionFlusherAndCompactor flusherAndCompactor, MasterRegionWALRoller walRoller) { this.server = server; this.region = region; @@ -301,14 +301,15 @@ private static WAL createWAL(WALFactory walFactory, MasterRegionWALRoller walRol private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSystem fs, Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory, - MasterRegionWALRoller walRoller, String serverName, boolean touchInitializingFlag) + MasterRegionWALRoller walRoller, MasterServices server, boolean touchInitializingFlag) throws IOException { TableName tn = td.getTableName(); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).setRegionId(REGION_ID).build(); Path tableDir = CommonFSUtils.getTableDir(rootDir, tn); // persist table descriptor FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, td, true); - HRegion.createHRegion(conf, regionInfo, fs, tableDir, td).close(); + HRegion.createHRegion(conf, regionInfo, fs, tableDir, td, server.getKeyManagementService()) + .close(); Path initializedFlag = new Path(tableDir, INITIALIZED_FLAG); if (!fs.mkdirs(initializedFlag)) { throw new IOException("Can not touch initialized flag: " + initializedFlag); @@ -317,8 +318,10 @@ private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSys if (!fs.delete(initializingFlag, true)) { LOG.warn("failed to clean up initializing flag: " + initializingFlag); } - WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo); - return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null); + WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir, + regionInfo); + return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null, + server.getKeyManagementService()); } private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IOException { @@ -330,7 +333,7 @@ private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IO private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo regionInfo, FileSystem fs, Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory, - MasterRegionWALRoller walRoller, String serverName) throws IOException { + MasterRegionWALRoller walRoller, MasterServices server) throws IOException { Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName()); Path walRegionDir = FSUtils.getRegionDirFromRootDir(walRootDir, regionInfo); Path replayEditsDir = new Path(walRegionDir, REPLAY_EDITS_DIR); @@ -346,7 +349,8 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r // to always exist in normal situations, but we should guard against users changing the // filesystem outside of HBase's line of sight. if (walFs.exists(walsDir)) { - replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, replayEditsDir); + replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, server.getServerName().toString(), + replayEditsDir); } else { LOG.error( "UNEXPECTED: WAL directory for MasterRegion is missing." + " {} is unexpectedly missing.", @@ -354,13 +358,15 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r } // Create a new WAL - WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo); + WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir, + regionInfo); conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR, replayEditsDir.makeQualified(walFs.getUri(), walFs.getWorkingDirectory()).toString()); // we do not do WAL splitting here so it is possible to have uncleanly closed WAL files, so we // need to ignore EOFException. conf.setBoolean(HRegion.RECOVERED_EDITS_IGNORE_EOF, true); - return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null); + return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null, + server); } private static void replayWALs(Configuration conf, FileSystem walFs, Path walRootDir, @@ -437,7 +443,7 @@ private static void tryMigrate(Configuration conf, FileSystem fs, Path tableDir, public static MasterRegion create(MasterRegionParams params) throws IOException { TableDescriptor td = params.tableDescriptor(); LOG.info("Create or load local region for table " + td); - Server server = params.server(); + MasterServices server = params.server(); Configuration baseConf = server.getConfiguration(); FileSystem fs = CommonFSUtils.getRootDirFileSystem(baseConf); FileSystem walFs = CommonFSUtils.getWALFileSystem(baseConf); @@ -476,8 +482,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException if (!fs.mkdirs(initializedFlag)) { throw new IOException("Can not touch initialized flag"); } - region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, - server.getServerName().toString(), true); + region = + bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server, true); } else { if (!fs.exists(initializedFlag)) { if (!fs.exists(initializingFlag)) { @@ -495,7 +501,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException RegionInfo regionInfo = loadRegionInfo(fs, tableDir); tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td); region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, - server.getServerName().toString()); + server); } else { // delete all contents besides the initializing flag, here we can make sure tableDir // exists(unless someone delete it manually...), so we do not do null check here. @@ -505,7 +511,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException } } region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, - server.getServerName().toString(), false); + server, false); } } else { if (fs.exists(initializingFlag) && !fs.delete(initializingFlag, true)) { @@ -515,8 +521,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException TableDescriptor oldTd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); RegionInfo regionInfo = loadRegionInfo(fs, tableDir); tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td); - region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, - server.getServerName().toString()); + region = + open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java index 71fb76bd0f1b..878f8dc17a1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java @@ -21,12 +21,12 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; @@ -113,7 +113,7 @@ private static TableDescriptor withTrackerConfigs(Configuration conf) { return tracker.updateWithTrackerConfigs(TableDescriptorBuilder.newBuilder(TABLE_DESC)).build(); } - public static MasterRegion create(Server server) throws IOException { + public static MasterRegion create(MasterServices server) throws IOException { Configuration conf = server.getConfiguration(); MasterRegionParams params = new MasterRegionParams().server(server) .regionDirName(MASTER_STORE_DIR).tableDescriptor(withTrackerConfigs(conf)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java index b9065747b669..443bca9f8c97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.master.region; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; /** @@ -27,7 +27,7 @@ @InterfaceAudience.Private public class MasterRegionParams { - private Server server; + private MasterServices server; private String regionDirName; @@ -55,7 +55,7 @@ public class MasterRegionParams { private Boolean useMetaCellComparator; - public MasterRegionParams server(Server server) { + public MasterRegionParams server(MasterServices server) { this.server = server; return this; } @@ -125,7 +125,7 @@ public MasterRegionParams useMetaCellComparator(boolean useMetaCellComparator) { return this; } - public Server server() { + public MasterServices server() { return server; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7936197ff8d8..99aca4f6abde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -146,6 +147,9 @@ import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerCall; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -166,6 +170,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; @@ -382,6 +387,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private final Configuration baseConf; private final int rowLockWaitDuration; static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000; + private final ManagedKeyDataCache managedKeyDataCache; + private final SystemKeyCache systemKeyCache; private Path regionWalDir; private FileSystem walFS; @@ -769,8 +776,36 @@ void sawNoSuchFamily() { public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, final RegionServerServices rsServices) { + this(tableDir, wal, fs, confParam, regionInfo, htd, rsServices, null); + } + + /** + * HRegion constructor. This constructor should only be used for testing and extensions. Instances + * of HRegion should be instantiated with the {@link HRegion#createHRegion} or + * {@link HRegion#openHRegion} method. + * @param tableDir qualified path of directory where region should be located, usually + * the table directory. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal + * file is a logfile from the previous execution that's + * custom-computed for this HRegion. The HRegionServer computes and + * sorts the appropriate wal info for this HRegion. If there is a + * previous wal file (implying that the HRegion has been written-to + * before), then read it from the supplied path. + * @param fs is the filesystem. + * @param confParam is global configuration settings. + * @param regionInfo - RegionInfo that describes the region is new), then read them from + * the supplied path. + * @param htd the table descriptor + * @param rsServices reference to {@link RegionServerServices} or null + * @param keyManagementService reference to {@link KeyManagementService} or null + * @deprecated Use other constructors. + */ + @Deprecated + public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, + final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, + final RegionServerServices rsServices, final KeyManagementService keyManagementService) { this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), wal, confParam, htd, - rsServices); + rsServices, keyManagementService); } /** @@ -789,6 +824,28 @@ public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, */ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam, final TableDescriptor htd, final RegionServerServices rsServices) { + this(fs, wal, confParam, htd, rsServices, null); + } + + /** + * HRegion constructor. This constructor should only be used for testing and extensions. Instances + * of HRegion should be instantiated with the {@link HRegion#createHRegion} or + * {@link HRegion#openHRegion} method. + * @param fs is the filesystem. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal + * file is a logfile from the previous execution that's + * custom-computed for this HRegion. The HRegionServer computes and + * sorts the appropriate wal info for this HRegion. If there is a + * previous wal file (implying that the HRegion has been written-to + * before), then read it from the supplied path. + * @param confParam is global configuration settings. + * @param htd the table descriptor + * @param rsServices reference to {@link RegionServerServices} or null + * @param keyManagementService reference to {@link KeyManagementService} or null + */ + public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam, + final TableDescriptor htd, final RegionServerServices rsServices, + KeyManagementService keyManagementService) { if (htd == null) { throw new IllegalArgumentException("Need table descriptor"); } @@ -929,6 +986,17 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co minBlockSizeBytes = Arrays.stream(this.htableDescriptor.getColumnFamilies()) .mapToInt(ColumnFamilyDescriptor::getBlocksize).min().orElse(HConstants.DEFAULT_BLOCKSIZE); + + if (SecurityUtil.isKeyManagementEnabled(conf)) { + if (keyManagementService == null) { + keyManagementService = KeyManagementService.createDefault(conf, fs.getFileSystem()); + } + this.managedKeyDataCache = keyManagementService.getManagedKeyDataCache(); + this.systemKeyCache = keyManagementService.getSystemKeyCache(); + } else { + this.managedKeyDataCache = null; + this.systemKeyCache = null; + } } private void setHTableSpecificConf() { @@ -2122,6 +2190,14 @@ public BlockCache getBlockCache() { return this.blockCache; } + public ManagedKeyDataCache getManagedKeyDataCache() { + return this.managedKeyDataCache; + } + + public SystemKeyCache getSystemKeyCache() { + return this.systemKeyCache; + } + /** * Only used for unit test which doesn't start region server. */ @@ -7579,37 +7655,60 @@ public String toString() { } // Utility methods + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) + public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf, + RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) { + return newHRegion(tableDir, wal, fs, conf, regionInfo, htd, rsServices, null); + } + /** * A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL} * configuration property. - * @param tableDir qualified path of directory where region should be located, usually the table - * directory. - * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a - * logfile from the previous execution that's custom-computed for this HRegion. - * The HRegionServer computes and sorts the appropriate wal info for this - * HRegion. If there is a previous file (implying that the HRegion has been - * written-to before), then read it from the supplied path. - * @param fs is the filesystem. - * @param conf is global configuration settings. - * @param regionInfo - RegionInfo that describes the region is new), then read them from the - * supplied path. - * @param htd the table descriptor + * @param tableDir qualified path of directory where region should be located, usually + * the table directory. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal + * file is a logfile from the previous execution that's + * custom-computed for this HRegion. The HRegionServer computes and + * sorts the appropriate wal info for this HRegion. If there is a + * previous file (implying that the HRegion has been written-to + * before), then read it from the supplied path. + * @param fs is the filesystem. + * @param conf is global configuration settings. + * @param regionInfo - RegionInfo that describes the region is new), then read them from + * the supplied path. + * @param htd the table descriptor + * @param keyManagementService reference to {@link KeyManagementService} or null * @return the new instance */ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf, - RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) { + RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices, + final KeyManagementService keyManagementService) { + List> ctorArgTypes = + Arrays.asList(Path.class, WAL.class, FileSystem.class, Configuration.class, RegionInfo.class, + TableDescriptor.class, RegionServerServices.class, KeyManagementService.class); + List ctorArgs = + Arrays.asList(tableDir, wal, fs, conf, regionInfo, htd, rsServices, keyManagementService); + + try { + return createInstance(conf, ctorArgTypes, ctorArgs); + } catch (Throwable e) { + // Try the old signature for the sake of test code. + return createInstance(conf, ctorArgTypes.subList(0, ctorArgTypes.size() - 1), + ctorArgs.subList(0, ctorArgs.size() - 1)); + } + } + + private static HRegion createInstance(Configuration conf, List> ctorArgTypes, + List ctorArgs) { try { @SuppressWarnings("unchecked") Class regionClass = (Class) conf.getClass(HConstants.REGION_IMPL, HRegion.class); Constructor c = - regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, Configuration.class, - RegionInfo.class, TableDescriptor.class, RegionServerServices.class); - - return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices); + regionClass.getConstructor(ctorArgTypes.toArray(new Class[ctorArgTypes.size()])); + return c.newInstance(ctorArgs.toArray(new Object[ctorArgs.size()])); } catch (Throwable e) { - // todo: what should I throw here? throw new IllegalStateException("Could not instantiate a region instance.", e); } } @@ -7622,6 +7721,7 @@ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configur * @param initialize - true to initialize the region * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion createHRegion(final RegionInfo info, final Path rootDir, final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, final boolean initialize) throws IOException { @@ -7637,16 +7737,35 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir, * @param rsRpcServices An interface we can request flushes against. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion createHRegion(final RegionInfo info, final Path rootDir, final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, final boolean initialize, RegionServerServices rsRpcServices) throws IOException { + return createHRegion(info, rootDir, conf, hTableDescriptor, wal, initialize, rsRpcServices, + null); + } + + /** + * Convenience method creating new HRegions. Used by createTable. + * @param info Info for region to create. + * @param rootDir Root directory for HBase instance + * @param wal shared WAL + * @param initialize - true to initialize the region + * @param rsRpcServices An interface we can request flushes against. + * @param keyManagementService reference to {@link KeyManagementService} or null + * @return new HRegion + */ + public static HRegion createHRegion(final RegionInfo info, final Path rootDir, + final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, + final boolean initialize, RegionServerServices rsRpcServices, + final KeyManagementService keyManagementService) throws IOException { LOG.info("creating " + info + ", tableDescriptor=" + (hTableDescriptor == null ? "null" : hTableDescriptor) + ", regionDir=" + rootDir); createRegionDir(conf, info, rootDir); FileSystem fs = rootDir.getFileSystem(conf); Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); - HRegion region = - HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, rsRpcServices); + HRegion region = HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, + rsRpcServices, keyManagementService); if (initialize) { region.initialize(null); } @@ -7657,11 +7776,13 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir, * Create a region under the given table directory. */ public static HRegion createHRegion(Configuration conf, RegionInfo regionInfo, FileSystem fs, - Path tableDir, TableDescriptor tableDesc) throws IOException { + Path tableDir, TableDescriptor tableDesc, KeyManagementService keyManagementService) + throws IOException { LOG.info("Creating {}, tableDescriptor={}, under table dir {}", regionInfo, tableDesc, tableDir); HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); - HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null); + HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null, + keyManagementService); return region; } @@ -7680,7 +7801,14 @@ public static HRegionFileSystem createRegionDir(Configuration configuration, Reg public static HRegion createHRegion(final RegionInfo info, final Path rootDir, final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal) throws IOException { - return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true); + return createHRegion(info, rootDir, conf, hTableDescriptor, wal, null); + } + + public static HRegion createHRegion(final RegionInfo info, final Path rootDir, + final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, + final KeyManagementService keyManagementService) throws IOException { + return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true, null, + keyManagementService); } /** @@ -7691,6 +7819,7 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir, * properly kept up. HRegionStore does this every time it opens a new region. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal, final Configuration conf) throws IOException { return openHRegion(info, htd, wal, conf, null, null); @@ -7712,7 +7841,8 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter) throws IOException { - return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter); + return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter, + rsServices); } /** @@ -7726,9 +7856,10 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h * @param conf The Configuration object to use. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion openHRegion(Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal, final Configuration conf) throws IOException { - return openHRegion(rootDir, info, htd, wal, conf, null, null); + return openHRegion(rootDir, info, htd, wal, conf, null, null, null); } /** @@ -7745,10 +7876,33 @@ public static HRegion openHRegion(Path rootDir, final RegionInfo info, final Tab * @param reporter An interface we can report progress against. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion openHRegion(final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter) throws IOException { + return openHRegion(rootDir, info, htd, wal, conf, rsServices, reporter, null); + } + + /** + * Open a Region. + * @param rootDir Root directory for HBase instance + * @param info Info for region to be opened. + * @param htd the table descriptor + * @param wal WAL for region to use. This method will call + * WAL#setSequenceNumber(long) passing the result of the call to + * HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. + * @param conf The Configuration object to use. + * @param rsServices An interface we can request flushes against. + * @param reporter An interface we can report progress against. + * @param keyManagementService reference to {@link KeyManagementService} or null + * @return new HRegion + */ + public static HRegion openHRegion(final Path rootDir, final RegionInfo info, + final TableDescriptor htd, final WAL wal, final Configuration conf, + final RegionServerServices rsServices, final CancelableProgressable reporter, + final KeyManagementService keyManagementService) throws IOException { FileSystem fs = null; if (rsServices != null) { fs = rsServices.getFileSystem(); @@ -7756,7 +7910,8 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info, if (fs == null) { fs = rootDir.getFileSystem(conf); } - return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter); + return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter, + keyManagementService); } /** @@ -7771,57 +7926,70 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info, * properly kept up. HRegionStore does this every time it opens a new region. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal) throws IOException { - return openHRegion(conf, fs, rootDir, info, htd, wal, null, null); + return openHRegion(conf, fs, rootDir, info, htd, wal, null, null, null); + } + + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) + public static HRegion openHRegion(final Configuration conf, final FileSystem fs, + final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal, + final RegionServerServices rsServices, final CancelableProgressable reporter) + throws IOException { + return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter, null); } /** * Open a Region. - * @param conf The Configuration object to use. - * @param fs Filesystem to use - * @param rootDir Root directory for HBase instance - * @param info Info for region to be opened. - * @param htd the table descriptor - * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) - * passing the result of the call to HRegion#getMinSequenceId() to ensure the - * wal id is properly kept up. HRegionStore does this every time it opens a new - * region. - * @param rsServices An interface we can request flushes against. - * @param reporter An interface we can report progress against. + * @param conf The Configuration object to use. + * @param fs Filesystem to use + * @param rootDir Root directory for HBase instance + * @param info Info for region to be opened. + * @param htd the table descriptor + * @param wal WAL for region to use. This method will call + * WAL#setSequenceNumber(long) passing the result of the call to + * HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. + * @param rsServices An interface we can request flushes against. + * @param reporter An interface we can report progress against. + * @param keyManagementService reference to {@link KeyManagementService} or null * @return new HRegion */ public static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal, - final RegionServerServices rsServices, final CancelableProgressable reporter) - throws IOException { + final RegionServerServices rsServices, final CancelableProgressable reporter, + final KeyManagementService keyManagementService) throws IOException { Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); - return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter); + return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter, + keyManagementService); } /** * Open a Region. - * @param conf The Configuration object to use. - * @param fs Filesystem to use - * @param info Info for region to be opened. - * @param htd the table descriptor - * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) - * passing the result of the call to HRegion#getMinSequenceId() to ensure the - * wal id is properly kept up. HRegionStore does this every time it opens a new - * region. - * @param rsServices An interface we can request flushes against. - * @param reporter An interface we can report progress against. + * @param conf The Configuration object to use. + * @param fs Filesystem to use + * @param info Info for region to be opened. + * @param htd the table descriptor + * @param wal WAL for region to use. This method will call + * WAL#setSequenceNumber(long) passing the result of the call to + * HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. + * @param rsServices An interface we can request flushes against. + * @param reporter An interface we can report progress against. + * @param keyManagementService reference to {@link KeyManagementService} or null * @return new HRegion * @throws NullPointerException if {@code info} is {@code null} */ public static HRegion openHRegionFromTableDir(final Configuration conf, final FileSystem fs, final Path tableDir, final RegionInfo info, final TableDescriptor htd, final WAL wal, - final RegionServerServices rsServices, final CancelableProgressable reporter) - throws IOException { + final RegionServerServices rsServices, final CancelableProgressable reporter, + final KeyManagementService keyManagementService) throws IOException { Objects.requireNonNull(info, "RegionInfo cannot be null"); LOG.debug("Opening region: {}", info); - HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices); + HRegion r = + HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices, keyManagementService); return r.openHRegion(reporter); } @@ -7835,19 +8003,15 @@ public NavigableMap getReplicationScope() { * @param reporter An interface we can report progress against. * @return new HRegion */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter) throws IOException { HRegionFileSystem regionFs = other.getRegionFileSystem(); HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(), - other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null); + other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null, null); return r.openHRegion(reporter); } - public static Region openHRegion(final Region other, final CancelableProgressable reporter) - throws IOException { - return openHRegion((HRegion) other, reporter); - } - /** * Open HRegion. *

@@ -7913,7 +8077,7 @@ public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, fi if (info.getReplicaId() <= 0) { info = RegionReplicaUtil.getRegionInfoForReplica(info, 1); } - HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null); + HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null, null); r.writestate.setReadOnly(true); return r.openHRegion(null); } @@ -7933,7 +8097,7 @@ public static HRegion warmupHRegion(final RegionInfo info, final TableDescriptor if (fs == null) { fs = rootDir.getFileSystem(conf); } - HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null); + HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null, null); r.initializeWarmup(reporter); r.close(); return r; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 98299c47302c..995f7fa6c47f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; +import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.quotas.RegionSizeStore; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; @@ -93,7 +94,7 @@ import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; -import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -336,7 +337,9 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { return new StoreContext.Builder().withBlockSize(family.getBlocksize()) - .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, family, + region.getManagedKeyDataCache(), region.getSystemKeyCache(), + KeyNamespaceUtil.constructKeyNamespace(region.getTableDescriptor(), family))) .withBloomType(family.getBloomFilterType()).withCacheConfig(createCacheConf(family)) .withCellComparator(region.getTableDescriptor().isMetaTable() || conf .getBoolean(HRegion.USE_META_CELL_COMPARATOR, HRegion.DEFAULT_USE_META_CELL_COMPARATOR) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index a7df71f460e4..0fb5c2e5f940 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL; + import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; @@ -43,7 +45,11 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; +import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -213,9 +219,16 @@ public long getMaxMemStoreTS() { */ private final BloomType cfBloomType; + private String keyNamespace; + + private SystemKeyCache systemKeyCache; + + private final ManagedKeyDataCache managedKeyDataCache; + /** * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram - * depending on the underlying files (10-20MB?). + * depending on the underlying files (10-20MB?). Since this is used only in read path, key + * namespace is not needed. * @param fs The current file system to use. * @param p The path of the file. * @param conf The current configuration. @@ -229,7 +242,9 @@ public long getMaxMemStoreTS() { */ public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf, BloomType cfBloomType, boolean primaryReplica, StoreFileTracker sft) throws IOException { - this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf); + this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf, null, null, + SecurityUtil.isKeyManagementEnabled(conf) ? SystemKeyCache.createCache(conf, fs) : null, + SecurityUtil.isKeyManagementEnabled(conf) ? new ManagedKeyDataCache(conf, null) : null); } /** @@ -243,8 +258,15 @@ public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheCo * ignored. * @param cacheConf The cache configuration and block cache reference. */ - public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf) { - this(fileInfo, cfBloomType, cacheConf, null); + public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf) + throws IOException { + this(fileInfo, cfBloomType, cacheConf, null, KeyNamespaceUtil.constructKeyNamespace(fileInfo), + SecurityUtil.isKeyManagementEnabled(fileInfo.getConf()) + ? SystemKeyCache.createCache(fileInfo.getConf(), fileInfo.getFileSystem()) + : null, + SecurityUtil.isKeyManagementEnabled(fileInfo.getConf()) + ? new ManagedKeyDataCache(fileInfo.getConf(), null) + : null); } /** @@ -260,10 +282,14 @@ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cac * @param metrics Tracks bloom filter requests and results. May be null. */ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf, - BloomFilterMetrics metrics) { + BloomFilterMetrics metrics, String keyNamespace, SystemKeyCache systemKeyCache, + ManagedKeyDataCache managedKeyDataCache) { this.fileInfo = fileInfo; this.cacheConf = cacheConf; this.metrics = metrics; + this.keyNamespace = keyNamespace != null ? keyNamespace : KEY_SPACE_GLOBAL; + this.systemKeyCache = systemKeyCache; + this.managedKeyDataCache = managedKeyDataCache; if (BloomFilterFactory.isGeneralBloomEnabled(fileInfo.getConf())) { this.cfBloomType = cfBloomType; } else { @@ -392,7 +418,8 @@ public HDFSBlocksDistribution getHDFSBlockDistribution() { private void open() throws IOException { fileInfo.initHDFSBlocksDistribution(); long readahead = fileInfo.isNoReadahead() ? 0L : -1L; - ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD); + ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD, + keyNamespace, systemKeyCache, managedKeyDataCache); fileInfo.initHFileInfo(context); StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf); if (reader == null) { @@ -540,7 +567,8 @@ public void initReader() throws IOException { private StoreFileReader createStreamReader(boolean canUseDropBehind) throws IOException { initReader(); final boolean doDropBehind = canUseDropBehind && cacheConf.shouldDropBehindCompaction(); - ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM); + ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM, + keyNamespace, systemKeyCache, managedKeyDataCache); StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf); if (reader == null) { reader = fileInfo.createReader(context, cacheConf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index a46e2dae695c..db5cec9f3228 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; @@ -54,7 +55,8 @@ * judicious adding API. Changes cause ripples through the code base. */ @InterfaceAudience.Private -public interface RegionServerServices extends Server, MutableOnlineRegions, FavoredNodesForRegion { +public interface RegionServerServices + extends Server, MutableOnlineRegions, FavoredNodesForRegion, KeyManagementService { /** Returns the WAL for a particular region. Pass null for getting the default (common) WAL */ WAL getWAL(RegionInfo regionInfo) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 1c837d216f38..998332637373 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorConfig; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.io.ByteBuffAllocator; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.wal.WAL; import org.apache.yetus.audience.InterfaceAudience; @@ -117,4 +119,12 @@ public int getNumStores() { long getMemStoreSize() { return region.getMemStoreDataSize(); } + + public ManagedKeyDataCache getManagedKeyDataCache() { + return rsServices.getManagedKeyDataCache(); + } + + public SystemKeyCache getSystemKeyCache() { + return rsServices.getSystemKeyCache(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index 30cf5e2a92fa..08e710826358 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.conf.ConfigKey; import org.apache.hadoop.hbase.io.hfile.BloomFilterMetrics; +import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy; @@ -116,6 +119,10 @@ public abstract class StoreEngine storeFiles) throws IOException; + + /** + * Get the store context. Get the store context. + * @return the store context. + */ + StoreContext getStoreContext(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index 779a114af594..87eca7b93c9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -375,6 +375,11 @@ public String createFromHFileLink(final String hfileLinkName, final boolean crea createBackRef); } + @Override + public StoreContext getStoreContext() { + return ctx; + } + public void removeStoreFiles(List storeFiles) throws IOException { archiveStoreFiles(storeFiles); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 9bbaf8cd72d2..1ca3f68ee997 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -41,9 +41,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationGroupOffset; @@ -369,21 +367,6 @@ public ChoreService getChoreService() { return null; } - @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { - return null; - } - @Override public FileSystem getFileSystem() { return null; @@ -403,5 +386,10 @@ public Connection createConnection(Configuration conf) throws IOException { public AsyncClusterConnection getAsyncClusterConnection() { return null; } + + @Override + public KeyManagementService getKeyManagementService() { + return null; + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 92b5f340a610..2e6e4cb4f933 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -17,8 +17,19 @@ */ package org.apache.hadoop.hbase.security; +import java.io.IOException; +import java.security.Key; +import java.security.KeyException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.crypto.Cipher; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -28,7 +39,6 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class SecurityUtil { - /** * Get the user name from a principal */ @@ -48,6 +58,183 @@ public static String getPrincipalWithoutRealm(final String principal) { return (i > -1) ? principal.substring(0, i) : principal; } + /** + * Helper to create an encyption context with current encryption key, suitable for writes. + * @param conf The current configuration. + * @param family The current column descriptor. + * @param managedKeyDataCache The managed key data cache. + * @param systemKeyCache The system key cache. + * @param keyNamespace The key namespace. + * @return The created encryption context. + * @throws IOException if an encryption key for the column cannot be unwrapped + * @throws IllegalStateException in case of encryption related configuration errors + */ + public static Encryption.Context createEncryptionContext(Configuration conf, + ColumnFamilyDescriptor family, ManagedKeyDataCache managedKeyDataCache, + SystemKeyCache systemKeyCache, String keyNamespace) throws IOException { + Encryption.Context cryptoContext = Encryption.Context.NONE; + String cipherName = family.getEncryptionType(); + if (cipherName != null) { + if (!Encryption.isEncryptionEnabled(conf)) { + throw new IllegalStateException("Encryption for family '" + family.getNameAsString() + + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); + } + Cipher cipher = null; + Key key = null; + ManagedKeyData kekKeyData = null; + if (isKeyManagementEnabled(conf)) { + kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, + keyNamespace); + // If no active key found in the specific namespace, try the global namespace + if (kekKeyData == null) { + kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, + ManagedKeyData.KEY_SPACE_GLOBAL); + keyNamespace = ManagedKeyData.KEY_SPACE_GLOBAL; + } + if (kekKeyData == null) { + throw new IOException( + "No active key found for custodian: " + ManagedKeyData.KEY_GLOBAL_CUSTODIAN + + " in namespaces: " + keyNamespace + " and " + ManagedKeyData.KEY_SPACE_GLOBAL); + } + if ( + conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY, + HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED) + ) { + cipher = + getCipherIfValid(conf, cipherName, kekKeyData.getTheKey(), family.getNameAsString()); + } else { + key = kekKeyData.getTheKey(); + kekKeyData = systemKeyCache.getLatestSystemKey(); + } + } else { + byte[] keyBytes = family.getEncryptionKey(); + if (keyBytes != null) { + // Family provides specific key material + key = EncryptionUtil.unwrapKey(conf, keyBytes); + } else { + cipher = getCipherIfValid(conf, cipherName, null, null); + } + } + if (key != null || cipher != null) { + if (key == null) { + // Family does not provide key material, create a random key + key = cipher.getRandomKey(); + } + if (cipher == null) { + cipher = getCipherIfValid(conf, cipherName, key, family.getNameAsString()); + } + cryptoContext = Encryption.newContext(conf); + cryptoContext.setCipher(cipher); + cryptoContext.setKey(key); + cryptoContext.setKeyNamespace(keyNamespace); + cryptoContext.setKEKData(kekKeyData); + } + } + return cryptoContext; + } + + /** + * Create an encryption context from encryption key found in a file trailer, suitable for read. + * @param conf The current configuration. + * @param path The path of the file. + * @param trailer The file trailer. + * @param managedKeyDataCache The managed key data cache. + * @param systemKeyCache The system key cache. + * @return The created encryption context or null if no key material is available. + * @throws IOException if an encryption key for the file cannot be unwrapped + */ + public static Encryption.Context createEncryptionContext(Configuration conf, Path path, + FixedFileTrailer trailer, ManagedKeyDataCache managedKeyDataCache, + SystemKeyCache systemKeyCache) throws IOException { + ManagedKeyData kekKeyData = null; + byte[] keyBytes = trailer.getEncryptionKey(); + Encryption.Context cryptoContext = Encryption.Context.NONE; + // Check for any key material available + if (keyBytes != null) { + cryptoContext = Encryption.newContext(conf); + Key kek = null; + // When the KEK medata is available, we will try to unwrap the encrypted key using the KEK, + // otherwise we will use the system keys starting from the latest to the oldest. + if (trailer.getKEKMetadata() != null) { + if (managedKeyDataCache == null) { + throw new IOException("Key management is enabled, but ManagedKeyDataCache is null"); + } + Throwable cause = null; + try { + kekKeyData = managedKeyDataCache.getEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, + trailer.getKeyNamespace(), trailer.getKEKMetadata(), keyBytes); + } catch (KeyException | IOException e) { + cause = e; + } + // When getEntry returns null we treat it the same as exception case. + if (kekKeyData == null) { + throw new IOException( + "Failed to get key data for KEK metadata: " + trailer.getKEKMetadata(), cause); + } + kek = kekKeyData.getTheKey(); + } else { + if (SecurityUtil.isKeyManagementEnabled(conf)) { + if (systemKeyCache == null) { + throw new IOException("Key management is enabled, but SystemKeyCache is null"); + } + ManagedKeyData systemKeyData = + systemKeyCache.getSystemKeyByChecksum(trailer.getKEKChecksum()); + if (systemKeyData == null) { + throw new IOException( + "Failed to get system key by checksum: " + trailer.getKEKChecksum()); + } + kek = systemKeyData.getTheKey(); + kekKeyData = systemKeyData; + } + } + Key key; + if (kek != null) { + try { + key = EncryptionUtil.unwrapKey(conf, null, keyBytes, kek); + } catch (KeyException | IOException e) { + throw new IOException("Failed to unwrap key with KEK checksum: " + + trailer.getKEKChecksum() + ", metadata: " + trailer.getKEKMetadata(), e); + } + } else { + key = EncryptionUtil.unwrapKey(conf, keyBytes); + } + // Use the algorithm the key wants + Cipher cipher = getCipherIfValid(conf, key.getAlgorithm(), key, null); + cryptoContext.setCipher(cipher); + cryptoContext.setKey(key); + cryptoContext.setKEKData(kekKeyData); + } + return cryptoContext; + } + + /** + * Get the cipher if the cipher name is valid, otherwise throw an exception. + * @param conf the configuration + * @param cipherName the cipher name to check + * @param key the key to check + * @param familyName the family name + * @return the cipher if the cipher name is valid + * @throws IllegalStateException if the cipher name is not valid + */ + private static Cipher getCipherIfValid(Configuration conf, String cipherName, Key key, + String familyName) { + // Fail if misconfigured + // We use the encryption type specified in the column schema as a sanity check + // on + // what the wrapped key is telling us + if (key != null && !key.getAlgorithm().equalsIgnoreCase(cipherName)) { + throw new IllegalStateException( + "Encryption for family '" + familyName + "' configured with type '" + cipherName + + "' but key specifies algorithm '" + key.getAlgorithm() + "'"); + } + // Use the algorithm the key wants + Cipher cipher = Encryption.getCipher(conf, cipherName); + if (cipher == null) { + throw new IllegalStateException("Cipher '" + cipherName + "' is not available"); + } + return cipher; + } + /** * From the given configuration, determine if key management is enabled. * @param conf the configuration to check diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 564c46ad5bf6..db7a9422b75e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -90,20 +91,27 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, * @param newRegions {@link RegionInfo} that describes the regions to create * @param task {@link RegionFillTask} custom code to populate region after creation */ - public static List createRegions(final Configuration conf, final Path rootDir, + public static List createRegions(final MasterProcedureEnv env, final Path rootDir, final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; - ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf, + ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(env.getMasterConfiguration(), "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); try { - return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task); + return createRegions(exec, env.getMasterConfiguration(), env, rootDir, tableDescriptor, + newRegions, task); } finally { exec.shutdownNow(); } } + public static List createRegions(final ThreadPoolExecutor exec, + final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, + final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { + return createRegions(exec, conf, null, rootDir, tableDescriptor, newRegions, task); + } + /** * Create new set of regions on the specified file-system. NOTE: that you should add the regions * to hbase:meta after this operation. @@ -115,8 +123,9 @@ public static List createRegions(final Configuration conf, final Pat * @param task {@link RegionFillTask} custom code to populate region after creation */ public static List createRegions(final ThreadPoolExecutor exec, - final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, - final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { + final Configuration conf, final MasterProcedureEnv env, final Path rootDir, + final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task) + throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService completionService = new ExecutorCompletionService<>(exec); @@ -125,7 +134,7 @@ public static List createRegions(final ThreadPoolExecutor exec, completionService.submit(new Callable() { @Override public RegionInfo call() throws IOException { - return createRegion(conf, rootDir, tableDescriptor, newRegion, task); + return createRegion(conf, env, rootDir, tableDescriptor, newRegion, task); } }); } @@ -151,15 +160,16 @@ public RegionInfo call() throws IOException { * @param newRegion {@link RegionInfo} that describes the region to create * @param task {@link RegionFillTask} custom code to populate region after creation */ - public static RegionInfo createRegion(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task) - throws IOException { + public static RegionInfo createRegion(final Configuration conf, final MasterProcedureEnv env, + final Path rootDir, final TableDescriptor tableDescriptor, final RegionInfo newRegion, + final RegionFillTask task) throws IOException { // 1. Create HRegion // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); - HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false); + HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false, + null, env == null ? null : env.getMasterServices()); try { // 2. Custom user code to interact with the created region if (task != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java index caccf816c8a3..f3b2e2ca1ade 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; import org.junit.After; import org.junit.Before; @@ -29,7 +30,7 @@ public class ManagedKeyTestBase { @Before public void setUp() throws Exception { TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, - MockManagedKeyProvider.class.getName()); + getKeyProviderClass().getName()); TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes", KeymetaServiceEndpoint.class.getName()); @@ -44,4 +45,8 @@ public void setUp() throws Exception { public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + + protected Class getKeyProviderClass() { + return MockManagedKeyProvider.class; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java index d476e0619ca4..8ae91de6588f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.keymeta; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; @@ -25,7 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -45,22 +46,36 @@ public void testGetKeyProviderWithInvalidProvider() throws Exception { conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, "org.apache.hadoop.hbase.keymeta.DummyKeyProvider"); - Server mockServer = mock(Server.class); + MasterServices mockServer = mock(MasterServices.class); when(mockServer.getConfiguration()).thenReturn(conf); - KeyManagementBase keyMgmt = new TestKeyManagement(mockServer); + final KeyManagementBase keyMgmt = new TestKeyManagement(mockServer); + assertEquals(mockServer, keyMgmt.getKeyManagementService()); // Should throw RuntimeException when provider is not ManagedKeyProvider RuntimeException exception = assertThrows(RuntimeException.class, () -> { keyMgmt.getKeyProvider(); }); - assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider")); + exception = assertThrows(RuntimeException.class, () -> { + KeyManagementBase keyMgmt2 = new TestKeyManagement(conf); + keyMgmt2.getKeyProvider(); + }); + assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider")); + + assertThrows(IllegalArgumentException.class, () -> { + Configuration configuration = null; + new TestKeyManagement(configuration); + }); } private static class TestKeyManagement extends KeyManagementBase { - public TestKeyManagement(Server server) { + public TestKeyManagement(MasterServices server) { super(server); } + + public TestKeyManagement(Configuration configuration) { + super(configuration); + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java new file mode 100644 index 000000000000..3fe669f90d80 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX; +import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.SeekableByteArrayInputStream; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestKeyManagementService { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeyManagementService.class); + + @Rule + public TestName name = new TestName(); + + protected Configuration conf = new Configuration(); + protected FileSystem mockFileSystem = mock(FileSystem.class); + + @Before + public void setUp() throws Exception { + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName()); + conf.set(HConstants.HBASE_ORIGINAL_DIR, "/tmp/hbase"); + } + + @Test + public void testDefaultKeyManagementServiceCreation() throws IOException { + // SystemKeyCache needs at least one valid key to be created, so setting up a mock FS that + // returns a mock file that returns a known mocked key metadata. + MockManagedKeyProvider provider = (MockManagedKeyProvider) Encryption.getKeyProvider(conf); + ManagedKeyData keyData = + provider.getManagedKey("system".getBytes(), ManagedKeyData.KEY_SPACE_GLOBAL); + String fileName = SYSTEM_KEY_FILE_PREFIX + "1"; + Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); + FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName); + + // Create a real FSDataInputStream that contains the key metadata in UTF format + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + dos.writeUTF(keyData.getKeyMetadata()); + dos.close(); + + SeekableByteArrayInputStream seekableStream = + new SeekableByteArrayInputStream(baos.toByteArray()); + FSDataInputStream realStream = new FSDataInputStream(seekableStream); + + when(mockFileSystem.open(eq(mockFileStatus.getPath()))).thenReturn(realStream); + when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*")))) + .thenReturn(new FileStatus[] { mockFileStatus }); + + KeyManagementService service = KeyManagementService.createDefault(conf, mockFileSystem); + assertNotNull(service); + assertNotNull(service.getSystemKeyCache()); + assertNotNull(service.getManagedKeyDataCache()); + assertThrows(UnsupportedOperationException.class, () -> service.getKeymetaAdmin()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java new file mode 100644 index 000000000000..1012d2b5a08f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestKeyNamespaceUtil { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeyNamespaceUtil.class); + + @Test + public void testConstructKeyNamespace_FromTableDescriptorAndFamilyDescriptor() { + TableDescriptor tableDescriptor = mock(TableDescriptor.class); + ColumnFamilyDescriptor familyDescriptor = mock(ColumnFamilyDescriptor.class); + when(tableDescriptor.getTableName()).thenReturn(TableName.valueOf("test")); + when(familyDescriptor.getNameAsString()).thenReturn("family"); + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableDescriptor, familyDescriptor); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_FromStoreContext() { + // Test store context path construction + TableName tableName = TableName.valueOf("test"); + RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); + HRegionFileSystem regionFileSystem = mock(HRegionFileSystem.class); + when(regionFileSystem.getRegionInfo()).thenReturn(regionInfo); + + ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of("family"); + + StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFileSystem) + .withColumnFamilyDescriptor(familyDescriptor).build(); + + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeContext); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_FromStoreFileInfo_RegularFile() { + // Test both regular files and linked files + StoreFileInfo storeFileInfo = mock(StoreFileInfo.class); + when(storeFileInfo.isLink()).thenReturn(false); + Path path = KeymetaTestUtils.createMockPath("test", "family"); + when(storeFileInfo.getPath()).thenReturn(path); + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_FromStoreFileInfo_LinkedFile() { + // Test both regular files and linked files + StoreFileInfo storeFileInfo = mock(StoreFileInfo.class); + HFileLink link = mock(HFileLink.class); + when(storeFileInfo.isLink()).thenReturn(true); + Path path = KeymetaTestUtils.createMockPath("test", "family"); + when(link.getOriginPath()).thenReturn(path); + when(storeFileInfo.getLink()).thenReturn(link); + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_FromPath() { + // Test path parsing with different HBase directory structures + Path path = KeymetaTestUtils.createMockPath("test", "family"); + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(path); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_FromStrings() { + // Test string-based construction + String tableName = "test"; + String family = "family"; + String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableName, family); + assertEquals("test/family", keyNamespace); + } + + @Test + public void testConstructKeyNamespace_NullChecks() { + // Test null inputs for both table name and family + assertThrows(NullPointerException.class, + () -> KeyNamespaceUtil.constructKeyNamespace(null, "family")); + assertThrows(NullPointerException.class, + () -> KeyNamespaceUtil.constructKeyNamespace("test", null)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java index bc8e14fe4b3d..7c884bdd27e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java @@ -148,9 +148,7 @@ public void testConvertToKeyCustBytesInvalid() { public void testGetResponseBuilder() { // Arrange String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes()); - String keyNamespace = "testNamespace"; - ManagedKeysRequest request = - requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build(); + ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build(); // Act ManagedKeysResponse.Builder result = @@ -158,7 +156,6 @@ public void testGetResponseBuilder() { // Assert assertNotNull(result); - assertEquals(keyNamespace, result.getKeyNamespace()); assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray()); verify(controller, never()).setFailed(anyString()); } @@ -167,9 +164,7 @@ public void testGetResponseBuilder() { public void testGetResponseBuilderWithInvalidBase64() { // Arrange String keyCust = "invalidBase64!"; - String keyNamespace = "testNamespace"; - ManagedKeysRequest request = - requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build(); + ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build(); // Act ManagedKeysResponse.Builder result = @@ -177,7 +172,6 @@ public void testGetResponseBuilderWithInvalidBase64() { // Assert assertNotNull(result); - assertEquals(keyNamespace, result.getKeyNamespace()); assertEquals(KEY_FAILED, result.getKeyState()); verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string")); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java index 12b74e1c3bcc..b695dedcdf98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -99,7 +99,7 @@ public class TestKeymetaTableAccessor { protected static String KEY_METADATA = "metadata1"; @Mock - protected Server server; + protected MasterServices server; @Mock protected Connection connection; @Mock @@ -127,6 +127,7 @@ public void setUp() throws Exception { when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table); when(server.getSystemKeyCache()).thenReturn(systemKeyCache); when(server.getConfiguration()).thenReturn(conf); + when(server.getKeyManagementService()).thenReturn(server); accessor = new KeymetaTableAccessor(server); managedKeyProvider = new MockManagedKeyProvider(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java index 61678e316ceb..807586a9a476 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java @@ -425,7 +425,7 @@ public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception { @Test public void testGenericCacheWithKeymetaAccessorException() throws Exception { when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata")) - .thenThrow(new IOException("Test exception")); + .thenThrow(new IOException("Test exception")); assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); verify(mockL2).getKey(any(), any(String.class), any(String.class)); clearInvocations(mockL2); @@ -436,7 +436,7 @@ public void testGenericCacheWithKeymetaAccessorException() throws Exception { @Test public void testGetActiveEntryWithKeymetaAccessorException() throws Exception { when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL)) - .thenThrow(new IOException("Test exception")); + .thenThrow(new IOException("Test exception")); assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); verify(mockL2).getActiveKey(any(), any(String.class)); clearInvocations(mockL2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java index 9882b823da8c..f541d4bac18c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java @@ -47,7 +47,8 @@ import org.mockito.MockitoAnnotations; /** - * Tests for SystemKeyCache class + * Tests for SystemKeyCache class. NOTE: The createCache() method is tested in + * TestKeyManagementService. */ @Category({ MasterTests.class, SmallTests.class }) public class TestSystemKeyCache { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java index f5c259927475..9cf69775a30e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java @@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseZKTestingUtil; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -72,7 +71,7 @@ public static void setUpBeforeClass() throws Exception { CHORE_SERVICE = new ChoreService("TestMasterStateStore"); HFILE_CLEANER_POOL = DirScanPool.getHFileCleanerScanPool(conf); LOG_CLEANER_POOL = DirScanPool.getLogCleanerScanPool(conf); - Server server = mock(Server.class); + MasterServices server = mock(MasterServices.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 6af4dcec1ad2..5b522dc91072 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; import org.apache.hadoop.hbase.keymeta.SystemKeyCache; @@ -587,8 +588,12 @@ public long flushTable(TableName tableName, List columnFamilies, long no return 0; } - @Override public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException { return 0; } + + @Override + public KeyManagementService getKeyManagementService() { + return this; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index ba3387b8ad1d..81977c24b290 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; import org.apache.hadoop.hbase.keymeta.SystemKeyCache; @@ -775,4 +776,9 @@ public ReplicateWALEntryResponse replicateToReplica(RpcController controller, ReplicateWALEntryRequest request) throws ServiceException { return null; } + + @Override + public KeyManagementService getKeyManagementService() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 02d76a9af3af..ac6d754a8396 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -33,9 +33,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskGroup; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -332,17 +330,7 @@ public ActiveMasterManager getActiveMasterManager() { } @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { + public KeyManagementService getKeyManagementService() { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java index 08ffef8e0e9f..6592238add50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; @@ -73,11 +72,12 @@ TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, }) @Category({ MasterTests.class, SmallTests.class }) public class TestKeymetaAdminImpl { - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final String CUST = "cust1"; private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes()); + private final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + @Rule public TestName name = new TestName(); @@ -86,7 +86,7 @@ public class TestKeymetaAdminImpl { protected FileSystem fs; protected FileSystem mockFileSystem = mock(FileSystem.class); - protected Server mockServer = mock(Server.class); + protected MasterServices mockServer = mock(MasterServices.class); protected KeymetaAdminImplForTest keymetaAdmin; KeymetaTableAccessor keymetaAccessor = mock(KeymetaTableAccessor.class); @@ -99,6 +99,7 @@ public void setUp() throws Exception { conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName()); + when(mockServer.getKeyManagementService()).thenReturn(mockServer); when(mockServer.getFileSystem()).thenReturn(mockFileSystem); when(mockServer.getConfiguration()).thenReturn(conf); keymetaAdmin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor); @@ -221,7 +222,7 @@ public void test() throws Exception { } private class KeymetaAdminImplForTest extends KeymetaAdminImpl { - public KeymetaAdminImplForTest(Server mockServer, KeymetaTableAccessor mockAccessor) { + public KeymetaAdminImplForTest(MasterServices mockServer, KeymetaTableAccessor mockAccessor) { super(mockServer); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java index 1c4ad60a8da1..0dc765ba7291 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; @@ -105,14 +106,6 @@ public void setUp() throws Exception { systemKeyManager = new SystemKeyManager(mockMaster); } - private static FileStatus createMockFile(String fileName) { - Path mockPath = mock(Path.class); - when(mockPath.getName()).thenReturn(fileName); - FileStatus mockFileStatus = mock(FileStatus.class); - when(mockFileStatus.getPath()).thenReturn(mockPath); - return mockFileStatus; - } - @RunWith(BlockJUnit4ClassRunner.class) @Category({ MasterTests.class, SmallTests.class }) public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager { @@ -128,8 +121,8 @@ public void setUp() throws Exception { @Test public void test() throws Exception { - assertNull(systemKeyManager.getAllSystemKeyFiles()); - assertNull(systemKeyManager.getLatestSystemKeyFile().getFirst()); + assertThrows(IOException.class, () -> systemKeyManager.getAllSystemKeyFiles()); + assertThrows(IOException.class, () -> systemKeyManager.getLatestSystemKeyFile().getFirst()); } } @@ -164,15 +157,15 @@ public static class TestAccessor extends TestSystemKeyAccessorAndManager { public void testGetLatestWithNone() throws Exception { when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]); - RuntimeException ex = assertThrows(RuntimeException.class, - () -> systemKeyManager.getLatestSystemKeyFile()); + RuntimeException ex = + assertThrows(RuntimeException.class, () -> systemKeyManager.getLatestSystemKeyFile()); assertEquals("No cluster key initialized yet", ex.getMessage()); } @Test public void testGetWithSingle() throws Exception { String fileName = SYSTEM_KEY_FILE_PREFIX + "1"; - FileStatus mockFileStatus = createMockFile(fileName); + FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName); Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*")))) @@ -192,7 +185,8 @@ public void testGetWithSingle() throws Exception { @Test public void testGetWithMultiple() throws Exception { FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3) - .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i)).toArray(FileStatus[]::new); + .mapToObj(i -> KeymetaTestUtils.createMockFile(SYSTEM_KEY_FILE_PREFIX + i)) + .toArray(FileStatus[]::new); Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*")))) @@ -208,7 +202,8 @@ public void testGetWithMultiple() throws Exception { @Test public void testExtractKeySequenceForInvalidFilename() throws Exception { - assertEquals(-1, SystemKeyAccessor.extractKeySequence(createMockFile("abcd").getPath())); + assertEquals(-1, + SystemKeyAccessor.extractKeySequence(KeymetaTestUtils.createMockFile("abcd").getPath())); } } @@ -235,7 +230,7 @@ public static Collection data() { @Test public void test() throws Exception { - FileStatus mockFileStatus = createMockFile(fileName); + FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName); IOException ex = assertThrows(IOException.class, () -> SystemKeyAccessor.extractSystemKeySeqNum(mockFileStatus.getPath())); @@ -345,7 +340,7 @@ public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception { when(mockFileSystem.create(any())).thenReturn(mockStream); when(mockFileSystem.rename(any(), any())).thenReturn(false); String fileName = SYSTEM_KEY_FILE_PREFIX + "1"; - FileStatus mockFileStatus = createMockFile(fileName); + FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName); when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0], new FileStatus[] { mockFileStatus }); @@ -498,6 +493,16 @@ public void testExtractKeySequenceEdgeCases() throws Exception { assertEquals(0, SystemKeyAccessor.extractKeySequence(validZero)); assertEquals(-1, SystemKeyAccessor.extractKeySequence(validNegative)); } + + @Test + public void testCreateCacheFactoryMethod() { + // Test static factory method + } + + @Test + public void testCreateCacheWithNoKeys() { + // Test behavior when no system keys are available + } } private static class MockSystemKeyManager extends SystemKeyManager { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index a5e3dd1a5b83..ab99c55e6255 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -38,9 +38,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; @@ -218,21 +216,6 @@ public Connection getConnection() { } } - @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { - return null; - } - @Override public FileSystem getFileSystem() { try { @@ -241,5 +224,10 @@ public FileSystem getFileSystem() { throw new UncheckedIOException(e); } } + + @Override + public KeyManagementService getKeyManagementService() { + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java index 0526fd3ba70c..9ea11f732310 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java @@ -26,12 +26,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.cleaner.DirScanPool; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; @@ -53,7 +53,7 @@ public class MasterRegionTestBase { protected DirScanPool logCleanerPool; - protected Server server; + protected MasterServices server; protected static byte[] CF1 = Bytes.toBytes("f1"); @@ -96,7 +96,7 @@ protected final void createMasterRegion() throws IOException { choreService = new ChoreService(getClass().getSimpleName()); hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf); logCleanerPool = DirScanPool.getLogCleanerScanPool(conf); - server = mock(Server.class); + server = mock(MasterServices.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java index 8f11cc415058..80792d4b276d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; @@ -119,7 +119,7 @@ public static void tearDown() throws IOException { } private MasterRegion createMasterRegion(ServerName serverName) throws IOException { - Server server = mock(Server.class); + MasterServices server = mock(MasterServices.class); when(server.getConfiguration()).thenReturn(HFILE_UTIL.getConfiguration()); when(server.getServerName()).thenReturn(serverName); MasterRegionParams params = new MasterRegionParams(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java index 790435f6a47e..779ca4dac6c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java @@ -24,9 +24,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.master.MockNoopMasterServices; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluation; @@ -34,47 +32,24 @@ import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.MockServer; import org.apache.hadoop.hbase.util.Pair; public class RegionProcedureStorePerformanceEvaluation extends ProcedureStorePerformanceEvaluation { - private static final class DummyServer extends MockServer { - - private final Configuration conf; + private static final class DummyServer extends MockNoopMasterServices { private final ServerName serverName = ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); public DummyServer(Configuration conf) { - this.conf = conf; - } - - @Override - public Configuration getConfiguration() { - return conf; + super(conf); } @Override public ServerName getServerName() { return serverName; } - - @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { - return null; - } } private MasterRegion region; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java index c05eb9a8ce3e..dac4cc1e0e73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; -import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter; @@ -51,7 +51,7 @@ public void setUp() throws IOException { conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false); Path testDir = htu.getDataTestDir(); CommonFSUtils.setRootDir(htu.getConfiguration(), testDir); - Server server = RegionProcedureStoreTestHelper.mockServer(conf); + MasterServices server = RegionProcedureStoreTestHelper.mockServer(conf); region = MasterRegionFactory.create(server); store = RegionProcedureStoreTestHelper.createStore(server, region, new LoadCounter()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java index 0607d9d3e924..cc90d6e22b61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader; @@ -36,8 +37,8 @@ final class RegionProcedureStoreTestHelper { private RegionProcedureStoreTestHelper() { } - static Server mockServer(Configuration conf) { - Server server = mock(Server.class); + static MasterServices mockServer(Configuration conf) { + MasterServices server = mock(MasterServices.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java index 7a6fee5f314c..70b93487c12b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.AssignProcedure; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; @@ -66,7 +66,7 @@ public class TestRegionProcedureStoreMigration { private HBaseCommonTestingUtil htu; - private Server server; + private MasterServices server; private MasterRegion region; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 22be21811950..9b6a5d80c9ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -46,9 +46,7 @@ import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -840,21 +838,6 @@ public ChoreService getChoreService() { return null; } - @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { - return null; - } - @Override public FileSystem getFileSystem() { return null; @@ -874,6 +857,11 @@ public Connection createConnection(Configuration conf) throws IOException { public AsyncClusterConnection getAsyncClusterConnection() { return null; } + + @Override + public KeyManagementService getKeyManagementService() { + return null; + } } static class CustomHeapMemoryTuner implements HeapMemoryTuner { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java index 6b372fa99350..1a4ba7ac99cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java @@ -125,7 +125,8 @@ public void test() throws Exception { Path rootDir = TEST_UTIL.getDataTestDir(); Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info); - region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs); + region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs, + rs.getKeyManagementService()); // create some recovered.edits final WALFactory wals = new WALFactory(CONF, method); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index ffc4e17f6f8b..29040ad58bec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -122,7 +122,8 @@ public void testOpenErrorMessageReference() throws IOException { storeFileTrackerForTest.createReference(r, p); StoreFileInfo sfi = storeFileTrackerForTest.getStoreFileInfo(p, true); try { - ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD); + ReaderContext context = + sfi.createReaderContext(false, 1000, ReaderType.PREAD, null, null, null); sfi.createReader(context, null); throw new IllegalStateException(); } catch (FileNotFoundException fnfe) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java new file mode 100644 index 000000000000..ca2f8088a786 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java @@ -0,0 +1,751 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.Key; +import java.security.KeyException; +import java.util.Arrays; +import java.util.Collection; +import javax.crypto.spec.SecretKeySpec; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.crypto.Cipher; +import org.apache.hadoop.hbase.io.crypto.CipherProvider; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.KeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider; +import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ TestSecurityUtil.TestBasic.class, + TestSecurityUtil.TestCreateEncryptionContext_ForWrites.class, + TestSecurityUtil.TestCreateEncryptionContext_ForReads.class, + TestSecurityUtil.TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class, }) +@Category({ SecurityTests.class, SmallTests.class }) +public class TestSecurityUtil { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSecurityUtil.class); + + // Test constants to eliminate magic strings and improve maintainability + protected static final String TEST_NAMESPACE = "test-namespace"; + protected static final String TEST_FAMILY = "test-family"; + protected static final String HBASE_KEY = "hbase"; + protected static final String TEST_KEK_METADATA = "test-kek-metadata"; + protected static final long TEST_KEK_CHECKSUM = 12345L; + protected static final String TEST_KEY_16_BYTE = "test-key-16-byte"; + protected static final String TEST_DEK_16_BYTE = "test-dek-16-byte"; + protected static final String INVALID_KEY_DATA = "invalid-key-data"; + protected static final String INVALID_WRAPPED_KEY_DATA = "invalid-wrapped-key-data"; + protected static final String INVALID_SYSTEM_KEY_DATA = "invalid-system-key-data"; + protected static final String UNKNOWN_CIPHER = "UNKNOWN_CIPHER"; + protected static final String AES_CIPHER = "AES"; + protected static final String DES_CIPHER = "DES"; + + protected Configuration conf; + protected HBaseTestingUtil testUtil; + protected Path testPath; + protected ColumnFamilyDescriptor mockFamily; + protected ManagedKeyDataCache mockManagedKeyDataCache; + protected SystemKeyCache mockSystemKeyCache; + protected FixedFileTrailer mockTrailer; + protected ManagedKeyData mockManagedKeyData; + protected Key testKey; + protected byte[] testWrappedKey; + protected Key kekKey; + + /** + * Configuration builder for setting up different encryption test scenarios. + */ + protected static class TestConfigBuilder { + private boolean encryptionEnabled = true; + private boolean keyManagementEnabled = false; + private boolean localKeyGenEnabled = false; + private String cipherProvider = "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider"; + private String keyProvider = MockAesKeyProvider.class.getName(); + private String masterKeyName = HBASE_KEY; + + public TestConfigBuilder withEncryptionEnabled(boolean enabled) { + this.encryptionEnabled = enabled; + return this; + } + + public TestConfigBuilder withKeyManagement(boolean enabled, boolean localKeyGen) { + this.keyManagementEnabled = enabled; + this.localKeyGenEnabled = localKeyGen; + return this; + } + + public TestConfigBuilder withNullCipherProvider() { + this.cipherProvider = NullCipherProvider.class.getName(); + return this; + } + + public void apply(Configuration conf) { + conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, encryptionEnabled); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, keyProvider); + conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, masterKeyName); + conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true"); + conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, cipherProvider); + + if (keyManagementEnabled) { + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY, + localKeyGenEnabled); + } else { + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false); + } + } + } + + protected static TestConfigBuilder configBuilder() { + return new TestConfigBuilder(); + } + + protected void setUpEncryptionConfig() { + // Set up real encryption configuration using default AES cipher + conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, true); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockAesKeyProvider.class.getName()); + conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); + // Enable key caching + conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true"); + // Use DefaultCipherProvider for real AES encryption functionality + conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, + "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider"); + } + + protected void setUpEncryptionConfigWithNullCipher() { + configBuilder().withNullCipherProvider().apply(conf); + } + + // ==== Mock Setup Helpers ==== + + protected void setupManagedKeyDataCache(String namespace, ManagedKeyData keyData) { + when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES), + eq(namespace))).thenReturn(keyData); + } + + protected void setupManagedKeyDataCache(String namespace, String globalSpace, + ManagedKeyData keyData) { + when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES), + eq(namespace))).thenReturn(null); + when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES), + eq(globalSpace))).thenReturn(keyData); + } + + protected void setupTrailerMocks(byte[] keyBytes, String metadata, Long checksum, + String namespace) { + when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(metadata); + if (checksum != null) { + when(mockTrailer.getKEKChecksum()).thenReturn(checksum); + } + when(mockTrailer.getKeyNamespace()).thenReturn(namespace); + } + + protected void setupSystemKeyCache(Long checksum, ManagedKeyData keyData) { + when(mockSystemKeyCache.getSystemKeyByChecksum(checksum)).thenReturn(keyData); + } + + protected void setupSystemKeyCache(ManagedKeyData latestKey) { + when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(latestKey); + } + + protected void setupManagedKeyDataCacheEntry(String namespace, String metadata, byte[] keyBytes, + ManagedKeyData keyData) throws IOException, KeyException { + when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES), + eq(namespace), eq(metadata), eq(keyBytes))).thenReturn(keyData); + } + + // ==== Exception Testing Helpers ==== + + protected void assertExceptionContains(Class expectedType, + String expectedMessage, Runnable testCode) { + T exception = assertThrows(expectedType, () -> testCode.run()); + assertTrue("Exception message should contain: " + expectedMessage, + exception.getMessage().contains(expectedMessage)); + } + + protected void assertEncryptionContextThrowsForWrites(Class expectedType, + String expectedMessage) { + Exception exception = assertThrows(Exception.class, () -> { + SecurityUtil.createEncryptionContext(conf, mockFamily, mockManagedKeyDataCache, + mockSystemKeyCache, TEST_NAMESPACE); + }); + assertTrue("Expected exception type: " + expectedType.getName() + ", but got: " + + exception.getClass().getName(), expectedType.isInstance(exception)); + assertTrue("Exception message should contain: " + expectedMessage, + exception.getMessage().contains(expectedMessage)); + } + + protected void assertEncryptionContextThrowsForReads(Class expectedType, + String expectedMessage) { + Exception exception = assertThrows(Exception.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + assertTrue("Expected exception type: " + expectedType.getName() + ", but got: " + + exception.getClass().getName(), expectedType.isInstance(exception)); + assertTrue("Exception message should contain: " + expectedMessage, + exception.getMessage().contains(expectedMessage)); + } + + @Before + public void setUp() throws Exception { + conf = HBaseConfiguration.create(); + testUtil = new HBaseTestingUtil(conf); + testPath = testUtil.getDataTestDir("test-file"); + + // Setup mocks (only for objects that don't have encryption logic) + mockFamily = mock(ColumnFamilyDescriptor.class); + mockManagedKeyDataCache = mock(ManagedKeyDataCache.class); + mockSystemKeyCache = mock(SystemKeyCache.class); + mockTrailer = mock(FixedFileTrailer.class); + mockManagedKeyData = mock(ManagedKeyData.class); + + // Use a real test key with exactly 16 bytes for AES-128 + testKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), AES_CIPHER); + + // Configure mocks + when(mockFamily.getEncryptionType()).thenReturn(AES_CIPHER); + when(mockFamily.getNameAsString()).thenReturn(TEST_FAMILY); + when(mockManagedKeyData.getTheKey()).thenReturn(testKey); + + // Set up default encryption config + setUpEncryptionConfig(); + + // Create test wrapped key + KeyProvider keyProvider = Encryption.getKeyProvider(conf); + kekKey = keyProvider.getKey(HBASE_KEY); + Key key = keyProvider.getKey(TEST_DEK_16_BYTE); + testWrappedKey = EncryptionUtil.wrapKey(conf, null, key, kekKey); + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ SecurityTests.class, SmallTests.class }) + public static class TestBasic extends TestSecurityUtil { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBasic.class); + + @Test + public void testGetUserFromPrincipal() { + // Test with slash separator + assertEquals("user1", SecurityUtil.getUserFromPrincipal("user1/host@REALM")); + assertEquals("user2", SecurityUtil.getUserFromPrincipal("user2@REALM")); + + // Test with no realm + assertEquals("user3", SecurityUtil.getUserFromPrincipal("user3")); + + // Test with multiple slashes + assertEquals("user4", SecurityUtil.getUserFromPrincipal("user4/host1/host2@REALM")); + } + + @Test + public void testGetPrincipalWithoutRealm() { + // Test with realm + assertEquals("user1/host", SecurityUtil.getPrincipalWithoutRealm("user1/host@REALM")); + assertEquals("user2", SecurityUtil.getPrincipalWithoutRealm("user2@REALM")); + + // Test without realm + assertEquals("user3", SecurityUtil.getPrincipalWithoutRealm("user3")); + assertEquals("user4/host", SecurityUtil.getPrincipalWithoutRealm("user4/host")); + } + + @Test + public void testIsKeyManagementEnabled() { + Configuration conf = HBaseConfiguration.create(); + + // Test default behavior (should be false) + assertFalse(SecurityUtil.isKeyManagementEnabled(conf)); + + // Test with key management enabled + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + assertTrue(SecurityUtil.isKeyManagementEnabled(conf)); + + // Test with key management disabled + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false); + assertFalse(SecurityUtil.isKeyManagementEnabled(conf)); + } + } + + // Tests for the first createEncryptionContext method (for ColumnFamilyDescriptor) + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ SecurityTests.class, SmallTests.class }) + public static class TestCreateEncryptionContext_ForWrites extends TestSecurityUtil { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForWrites.class); + + @Test + public void testWithNoEncryptionOnFamily() throws IOException { + when(mockFamily.getEncryptionType()).thenReturn(null); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, "test-namespace"); + + assertEquals(Encryption.Context.NONE, result); + } + + @Test + public void testWithEncryptionDisabled() throws IOException { + configBuilder().withEncryptionEnabled(false).apply(conf); + assertEncryptionContextThrowsForWrites(IllegalStateException.class, + "encryption feature is disabled"); + } + + @Test + public void testWithKeyManagement_LocalKeyGen() throws IOException { + configBuilder().withKeyManagement(true, true).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE); + + verifyContext(result); + } + + @Test + public void testWithKeyManagement_NoActiveKey() throws IOException { + configBuilder().withKeyManagement(true, false).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, null); + assertEncryptionContextThrowsForWrites(IOException.class, "No active key found"); + } + + @Test + public void testWithKeyManagement_LocalKeyGen_WithUnknownKeyCipher() throws IOException { + when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER); + Key unknownKey = mock(Key.class); + when(unknownKey.getAlgorithm()).thenReturn(UNKNOWN_CIPHER); + when(mockManagedKeyData.getTheKey()).thenReturn(unknownKey); + + configBuilder().withKeyManagement(true, true).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData); + assertEncryptionContextThrowsForWrites(RuntimeException.class, + "Cipher 'UNKNOWN_CIPHER' is not"); + } + + @Test + public void testWithKeyManagement_LocalKeyGen_WithKeyAlgorithmMismatch() throws IOException { + Key desKey = mock(Key.class); + when(desKey.getAlgorithm()).thenReturn(DES_CIPHER); + when(mockManagedKeyData.getTheKey()).thenReturn(desKey); + + configBuilder().withKeyManagement(true, true).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData); + assertEncryptionContextThrowsForWrites(IllegalStateException.class, + "Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'"); + } + + @Test + public void testWithKeyManagement_UseSystemKeyWithNSSpecificActiveKey() throws IOException { + configBuilder().withKeyManagement(true, false).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData); + setupSystemKeyCache(mockManagedKeyData); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE); + + verifyContext(result); + } + + @Test + public void testWithKeyManagement_UseSystemKeyWithoutNSSpecificActiveKey() throws IOException { + configBuilder().withKeyManagement(true, false).apply(conf); + setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, mockManagedKeyData); + setupSystemKeyCache(mockManagedKeyData); + when(mockManagedKeyData.getTheKey()).thenReturn(kekKey); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE); + + verifyContext(result); + } + + @Test + public void testWithoutKeyManagement_WithFamilyProvidedKey() throws Exception { + when(mockFamily.getEncryptionKey()).thenReturn(testWrappedKey); + configBuilder().withKeyManagement(false, false).apply(conf); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE); + + verifyContext(result, false); + } + + @Test + public void testWithoutKeyManagement_KeyAlgorithmMismatch() throws Exception { + // Create a key with different algorithm and wrap it + Key differentKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), DES_CIPHER); + byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, differentKey); + when(mockFamily.getEncryptionKey()).thenReturn(wrappedDESKey); + + configBuilder().withKeyManagement(false, false).apply(conf); + assertEncryptionContextThrowsForWrites(IllegalStateException.class, + "Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'"); + } + + @Test + public void testWithoutKeyManagement_WithRandomKeyGeneration() throws IOException { + when(mockFamily.getEncryptionKey()).thenReturn(null); + configBuilder().withKeyManagement(false, false).apply(conf); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily, + mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE); + + verifyContext(result, false); + } + + @Test + public void testWithUnavailableCipher() throws IOException { + when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER); + setUpEncryptionConfigWithNullCipher(); + assertEncryptionContextThrowsForWrites(IllegalStateException.class, + "Cipher 'UNKNOWN_CIPHER' is not available"); + } + + // Tests for the second createEncryptionContext method (for reading files) + + @Test + public void testWithNoKeyMaterial() throws IOException { + when(mockTrailer.getEncryptionKey()).thenReturn(null); + when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, + mockManagedKeyDataCache, mockSystemKeyCache); + + assertEquals(Encryption.Context.NONE, result); + } + } + + // Tests for the second createEncryptionContext method (for reading files) + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ SecurityTests.class, SmallTests.class }) + public static class TestCreateEncryptionContext_ForReads extends TestSecurityUtil { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForReads.class); + + @Test + public void testWithKEKMetadata() throws Exception { + setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE); + setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, testWrappedKey, + mockManagedKeyData); + when(mockManagedKeyData.getTheKey()).thenReturn(kekKey); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, + mockManagedKeyDataCache, mockSystemKeyCache); + + verifyContext(result); + } + + @Test + public void testWithKeyManagement_KEKMetadataFailure() throws IOException, KeyException { + byte[] keyBytes = "test-encrypted-key".getBytes(); + String kekMetadata = "test-kek-metadata"; + + when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata); + when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace"); + + when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES), + eq("test-namespace"), eq(kekMetadata), eq(keyBytes))) + .thenThrow(new IOException("Key not found")); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains("Failed to get key data")); + } + + @Test + public void testWithKeyManagement_UseSystemKey() throws IOException { + setupTrailerMocks(testWrappedKey, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE); + configBuilder().withKeyManagement(true, false).apply(conf); + setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData); + when(mockManagedKeyData.getTheKey()).thenReturn(kekKey); + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, + mockManagedKeyDataCache, mockSystemKeyCache); + + verifyContext(result); + } + + @Test + public void testWithKeyManagement_SystemKeyNotFound() throws IOException { + byte[] keyBytes = "test-encrypted-key".getBytes(); + long kekChecksum = 12345L; + + when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(null); + when(mockTrailer.getKEKChecksum()).thenReturn(kekChecksum); + when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace"); + + // Enable key management + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + + when(mockSystemKeyCache.getSystemKeyByChecksum(kekChecksum)).thenReturn(null); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains("Failed to get system key")); + } + + @Test + public void testWithoutKeyManagemntEnabled() throws IOException { + when(mockTrailer.getEncryptionKey()).thenReturn(testWrappedKey); + when(mockTrailer.getKEKMetadata()).thenReturn(null); + when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE); + configBuilder().withKeyManagement(false, false).apply(conf); + // TODO: Get the key provider to return kek when getKeys() is called. + + Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, + mockManagedKeyDataCache, mockSystemKeyCache); + + verifyContext(result, false); + } + + @Test + public void testWithoutKeyManagement_UnwrapFailure() throws IOException { + byte[] invalidKeyBytes = INVALID_KEY_DATA.getBytes(); + when(mockTrailer.getEncryptionKey()).thenReturn(invalidKeyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(null); + when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE); + configBuilder().withKeyManagement(false, false).apply(conf); + + Exception exception = assertThrows(Exception.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + // The exception should indicate that unwrapping failed - could be IOException or + // RuntimeException + assertNotNull(exception); + } + + @Test + public void testCreateEncryptionContext_WithoutKeyManagement_UnavailableCipher() + throws Exception { + // Create a DES key and wrap it first with working configuration + Key desKey = new SecretKeySpec("test-key-16-byte".getBytes(), "DES"); + byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, "hbase", desKey); + + when(mockTrailer.getEncryptionKey()).thenReturn(wrappedDESKey); + when(mockTrailer.getKEKMetadata()).thenReturn(null); + when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace"); + + // Disable key management and use null cipher provider + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false); + setUpEncryptionConfigWithNullCipher(); + + RuntimeException exception = assertThrows(RuntimeException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains("Cipher 'AES' not available")); + } + + @Test + public void testCreateEncryptionContext_WithKeyManagement_NullKeyManagementCache() + throws IOException { + byte[] keyBytes = "test-encrypted-key".getBytes(); + String kekMetadata = "test-kek-metadata"; + + when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata); + when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace"); + + // Enable key management + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, null, mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains("ManagedKeyDataCache is null")); + } + + @Test + public void testCreateEncryptionContext_WithKeyManagement_NullSystemKeyCache() + throws IOException { + byte[] keyBytes = "test-encrypted-key".getBytes(); + + when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes); + when(mockTrailer.getKEKMetadata()).thenReturn(null); + when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace"); + + // Enable key management + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + null); + }); + + assertTrue(exception.getMessage().contains("SystemKeyCache is null")); + } + } + + @RunWith(Parameterized.class) + @Category({ SecurityTests.class, SmallTests.class }) + public static class TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException + extends TestSecurityUtil { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule + .forClass(TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class); + + @Parameter(0) + public boolean isKeyException; + + @Parameterized.Parameters(name = "{index},isKeyException={0}") + public static Collection data() { + return Arrays.asList(new Object[][] { { true }, { false }, }); + } + + @Test + public void test() throws IOException { + } + + @Test + public void testWithDEK() throws IOException, KeyException { + // This test is challenging because we need to create a scenario where unwrapping fails + // with either KeyException or IOException. We'll create invalid wrapped data. + byte[] invalidKeyBytes = INVALID_WRAPPED_KEY_DATA.getBytes(); + + setupTrailerMocks(invalidKeyBytes, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE); + setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, invalidKeyBytes, + mockManagedKeyData); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains("Failed to unwrap key with KEK checksum: " + + TEST_KEK_CHECKSUM + ", metadata: " + TEST_KEK_METADATA)); + // The root cause should be some kind of parsing/unwrapping exception + assertNotNull(exception.getCause()); + } + + @Test + public void testWithSystemKey() throws IOException { + // Use invalid key bytes to trigger unwrapping failure + byte[] invalidKeyBytes = INVALID_SYSTEM_KEY_DATA.getBytes(); + + setupTrailerMocks(invalidKeyBytes, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE); + configBuilder().withKeyManagement(true, false).apply(conf); + setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData); + + IOException exception = assertThrows(IOException.class, () -> { + SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache, + mockSystemKeyCache); + }); + + assertTrue(exception.getMessage().contains( + "Failed to unwrap key with KEK checksum: " + TEST_KEK_CHECKSUM + ", metadata: null")); + // The root cause should be some kind of parsing/unwrapping exception + assertNotNull(exception.getCause()); + } + } + + protected void verifyContext(Encryption.Context context) { + verifyContext(context, true); + } + + protected void verifyContext(Encryption.Context context, boolean withKeyManagement) { + assertNotNull(context); + assertNotNull("Context should have a cipher", context.getCipher()); + assertNotNull("Context should have a key", context.getKey()); + if (withKeyManagement) { + assertNotNull("Context should have KEK data when key management is enabled", + context.getKEKData()); + } else { + assertNull("Context should not have KEK data when key management is disabled", + context.getKEKData()); + } + } + + /** + * Null cipher provider for testing error cases. + */ + public static class NullCipherProvider implements CipherProvider { + private Configuration conf; + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public String getName() { + return "null"; + } + + @Override + public String[] getSupportedCiphers() { + return new String[0]; + } + + @Override + public Cipher getCipher(String name) { + return null; // Always return null to simulate unavailable cipher + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 5d62d6a908c0..273385ec9c84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -55,9 +55,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.ipc.SimpleRpcServer; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -354,27 +352,17 @@ public ChoreService getChoreService() { } @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { + public Connection createConnection(Configuration conf) throws IOException { return null; } @Override - public Connection createConnection(Configuration conf) throws IOException { + public AsyncClusterConnection getAsyncClusterConnection() { return null; } @Override - public AsyncClusterConnection getAsyncClusterConnection() { + public KeyManagementService getKeyManagementService() { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 7b2749177889..a0246fee2955 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -178,7 +178,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception { for (RegionInfo restoredRegion : restoredRegions) { // open restored region HRegion region = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null, - fs, conf, restoredRegion, htd, null); + fs, conf, restoredRegion, htd, null, null); // set restore flag region.setRestoredRegion(true); region.initialize(); @@ -188,7 +188,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception { // open restored region without set restored flag HRegion region2 = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null, - fs, conf, restoredRegion, htd, null); + fs, conf, restoredRegion, htd, null, null); region2.initialize(); long maxSeqId2 = WALSplitUtil.getMaxRegionSequenceId(fs, recoveredEdit); Assert.assertTrue(maxSeqId2 > maxSeqId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index 98283db19146..77b6ceffe7ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -26,9 +26,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; -import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; -import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.slf4j.Logger; @@ -103,21 +101,6 @@ public ChoreService getChoreService() { throw new UnsupportedOperationException(); } - @Override - public SystemKeyCache getSystemKeyCache() { - return null; - } - - @Override - public ManagedKeyDataCache getManagedKeyDataCache() { - return null; - } - - @Override - public KeymetaAdmin getKeymetaAdmin() { - return null; - } - @Override public FileSystem getFileSystem() { throw new UnsupportedOperationException(); @@ -137,4 +120,9 @@ public Connection createConnection(Configuration conf) throws IOException { public AsyncClusterConnection getAsyncClusterConnection() { throw new UnsupportedOperationException(); } + + @Override + public KeyManagementService getKeyManagementService() { + return null; + } } diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index fb699554fc06..d595b2dd219d 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -41,6 +41,12 @@ org.apache.hbase hbase-common + + org.apache.hbase + hbase-common + test-jar + test + org.apache.hbase hbase-annotations diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index 9b24e5caa973..a7e531806cfe 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -1,3 +1,5 @@ +# frozen_string_literal: true + # # # Licensed to the Apache Software Foundation (ASF) under one @@ -29,6 +31,7 @@ require 'hbase/visibility_labels' module Hbase + # Main HBase class for connection and admin operations class Hbase attr_accessor :configuration @@ -45,22 +48,21 @@ def initialize(config = nil) end def connection - if @connection.nil? - @connection = ConnectionFactory.createConnection(configuration) - end + @connection = ConnectionFactory.createConnection(configuration) if @connection.nil? @connection end + # Returns ruby's Admin class from admin.rb def admin - ::Hbase::Admin.new(self.connection) + ::Hbase::Admin.new(connection) end def rsgroup_admin - ::Hbase::RSGroupAdmin.new(self.connection) + ::Hbase::RSGroupAdmin.new(connection) end def keymeta_admin - ::Hbase::KeymetaAdmin.new(@connection) + ::Hbase::KeymetaAdmin.new(connection) end def taskmonitor @@ -69,7 +71,7 @@ def taskmonitor # Create new one each time def table(table, shell) - ::Hbase::Table.new(self.connection.getTable(TableName.valueOf(table)), shell) + ::Hbase::Table.new(connection.getTable(TableName.valueOf(table)), shell) end def replication_admin @@ -77,21 +79,19 @@ def replication_admin end def security_admin - ::Hbase::SecurityAdmin.new(self.connection.getAdmin) + ::Hbase::SecurityAdmin.new(connection.getAdmin) end def visibility_labels_admin - ::Hbase::VisibilityLabelsAdmin.new(self.connection.getAdmin) + ::Hbase::VisibilityLabelsAdmin.new(connection.getAdmin) end def quotas_admin - ::Hbase::QuotasAdmin.new(self.connection.getAdmin) + ::Hbase::QuotasAdmin.new(connection.getAdmin) end def shutdown - if @connection != nil - @connection.close - end + @connection&.close end end end diff --git a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb index e2af5f524cc3..98a57766831a 100644 --- a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb +++ b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb @@ -34,7 +34,7 @@ def format_status_row(status) [ status.getKeyCustodianEncoded, status.getKeyNamespace, - status.getKeyStatus.toString, + status.getKeyState.toString, status.getKeyMetadata, status.getKeyMetadataHashEncoded, status.getRefreshTimestamp diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java new file mode 100644 index 000000000000..b67fbc69f3c7 --- /dev/null +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider; +import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.jruby.embed.ScriptingContainer; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, IntegrationTests.class }) +public class TestKeymetaAdminShell extends ManagedKeyTestBase implements RubyShellTest { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeymetaAdminShell.class); + + private final ScriptingContainer jruby = new ScriptingContainer(); + + @Before + public void setUp() throws Exception { + final Configuration conf = TEST_UTIL.getConfiguration(); + conf.set("zookeeper.session.timeout", "6000000"); + conf.set("hbase.rpc.timeout", "6000000"); + conf.set("hbase.rpc.read.timeout", "6000000"); + conf.set("hbase.rpc.write.timeout", "6000000"); + conf.set("hbase.client.operation.timeout", "6000000"); + conf.set("hbase.client.scanner.timeout.period", "6000000"); + conf.set("hbase.ipc.client.socket.timeout.connect", "6000000"); + conf.set("hbase.ipc.client.socket.timeout.read", "6000000"); + conf.set("hbase.ipc.client.socket.timeout.write", "6000000"); + conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000"); + conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000"); + conf.set("hbase.client.sync.wait.timeout.msec", "6000000"); + Map cust2key = new HashMap<>(); + Map cust2alias = new HashMap<>(); + String clusterId = UUID.randomUUID().toString(); + String SYSTEM_KEY_ALIAS = "system-key-alias"; + String CUST1 = "cust1"; + String CUST1_ALIAS = "cust1-alias"; + String GLOB_CUST_ALIAS = "glob-cust-alias"; + String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, true, true, store -> { + Properties p = new Properties(); + try { + KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust2key, cust2alias, + p); + KeymetaTestUtils.addEntry(conf, 128, store, GLOB_CUST_ALIAS, "*", true, cust2key, + cust2alias, p); + KeymetaTestUtils.addEntry(conf, 128, store, SYSTEM_KEY_ALIAS, clusterId, true, cust2key, + cust2alias, p); + } catch (Exception e) { + throw new RuntimeException(e); + } + return p; + }); + // byte[] systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get(); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS); + conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, providerParams); + RubyShellTest.setUpConfig(this); + super.setUp(); + RubyShellTest.setUpJRubyRuntime(this); + RubyShellTest.doTestSetup(this); + addCustodianRubyEnvVars(jruby, "CUST1", CUST1); + } + + @Override + public HBaseTestingUtil getTEST_UTIL() { + return TEST_UTIL; + } + + @Override + public ScriptingContainer getJRuby() { + return jruby; + } + + @Override + public String getSuitePattern() { + return "**/*_keymeta_test.rb"; + } + + @Test + public void testRunShellTests() throws Exception { + RubyShellTest.testRunShellTests(this); + } + + @Override + protected Class getKeyProviderClass() { + return ManagedKeyStoreKeyProvider.class; + } + + public static void addCustodianRubyEnvVars(ScriptingContainer jruby, String custId, + String custodian) { + jruby.put("$" + custId, custodian); + jruby.put("$" + custId + "_ALIAS", custodian + "-alias"); + jruby.put("$" + custId + "_ENCODED", Base64.getEncoder().encodeToString(custodian.getBytes())); + } +} diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb new file mode 100644 index 000000000000..c1108d0fc7d1 --- /dev/null +++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'hbase_shell' +require 'stringio' +require 'hbase_constants' +require 'hbase/hbase' +require 'hbase/table' + +module Hbase + # Test class for keymeta admin functionality + class KeymetaAdminTest < Test::Unit::TestCase + include TestHelpers + + def setup + setup_hbase + end + + define_test 'Test enable key management' do + cust_and_namespace = "#{$CUST1_ENCODED}:*" + # Repeat the enable twice in a loop and ensure multiple enables succeed and return the + # same output. + 2.times do |i| + output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) } + puts "enable_key_management #{i} output: #{output}" + assert(output.include?("#{$CUST1_ENCODED} * ACTIVE")) + end + output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) } + puts "show_key_status output: #{output}" + assert(output.include?("#{$CUST1_ENCODED} * ACTIVE")) + + # The ManagedKeyStoreKeyProvider doesn't support specific namespaces, so it will return the + # global key. + cust_and_namespace = "#{$CUST1_ENCODED}:test_table/f" + output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) } + puts "enable_key_management output: #{output}" + assert(output.include?("#{$CUST1_ENCODED} * ACTIVE")) + output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) } + puts "show_key_status output: #{output}" + assert(output.include?('0 row(s)')) + end + end +end diff --git a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb new file mode 100644 index 000000000000..be52a2524e4d --- /dev/null +++ b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'hbase_shell' +require 'stringio' +require 'hbase_constants' +require 'hbase/hbase' +require 'hbase/table' + +java_import org.apache.hadoop.conf.Configuration +java_import org.apache.hadoop.fs.FSDataInputStream +java_import org.apache.hadoop.hbase.CellUtil +java_import org.apache.hadoop.hbase.HConstants +java_import org.apache.hadoop.hbase.client.Get +java_import org.apache.hadoop.hbase.io.crypto.Encryption +java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider +java_import org.apache.hadoop.hbase.io.hfile.CorruptHFileException +java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer +java_import org.apache.hadoop.hbase.io.hfile.HFile +java_import org.apache.hadoop.hbase.io.hfile.CacheConfig +java_import org.apache.hadoop.hbase.util.Bytes + +module Hbase + # Test class for encrypted table keymeta functionality + class EncryptedTableKeymetaTest < Test::Unit::TestCase + include TestHelpers + + def setup + setup_hbase + @test_table = 'enctest' + @connection = $TEST_CLUSTER.connection + end + + define_test 'Test table put/get with encryption' do + cust_and_namespace = "#{$CUST1_ENCODED}:*" + @shell.command(:enable_key_management, cust_and_namespace) + @shell.command(:create, @test_table, { 'NAME' => 'f', 'ENCRYPTION' => 'AES' }) + test_table = table(@test_table) + test_table.put('1', 'f:a', '2') + puts "Added a row, now flushing table #{@test_table}" + command(:flush, @test_table) + + table_name = TableName.valueOf(@test_table) + store_file_info = nil + $TEST_CLUSTER.getRSForFirstRegionInTable(table_name).getRegions(table_name).each do |region| + region.getStores.each do |store| + store.getStorefiles.each do |storefile| + store_file_info = storefile.getFileInfo + end + end + end + assert_not_nil(store_file_info) + hfile_info = store_file_info.getHFileInfo + assert_not_nil(hfile_info) + live_trailer = hfile_info.getTrailer + assert_trailer(live_trailer) + + ## Disable table to ensure that the stores are not cached. + command(:disable, @test_table) + assert(!command(:is_enabled, @test_table)) + + # Open FSDataInputStream to the path pointed to by the store_file_info + fs = store_file_info.getFileSystem + fio = fs.open(store_file_info.getPath) + assert_not_nil(fio) + # Read trailer using FiledFileTrailer + offline_trailer = FixedFileTrailer.readFromStream( + fio, fs.getFileStatus(store_file_info.getPath).getLen + ) + fio.close + assert_trailer(offline_trailer, live_trailer) + + # Test for the ability to read HFile with encryption in an offline offline + reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true, + $TEST_CLUSTER.getConfiguration) + assert_not_nil(reader) + offline_trailer = reader.getTrailer + assert_trailer(offline_trailer, live_trailer) + scanner = reader.getScanner($TEST_CLUSTER.getConfiguration, false, false) + assert_true(scanner.seekTo) + cell = scanner.getCell + assert_equal('1', Bytes.toString(CellUtil.cloneRow(cell))) + assert_equal('2', Bytes.toString(CellUtil.cloneValue(cell))) + assert_false(scanner.next) + + # Confirm that the offline reading will fail with no config related to encryption + Encryption.clearKeyProviderCache + conf = Configuration.new($TEST_CLUSTER.getConfiguration) + conf.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.java_class.getName) + # This is expected to fail with CorruptHFileException. + assert_raises(CorruptHFileException) do |e| + reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true, conf) + assert_true(e.message.include?( + "Problem reading HFile Trailer from file #{store_file_info.getPath}" + )) + end + Encryption.clearKeyProviderCache + + ## Enable back the table to be able to query. + command(:enable, @test_table) + assert(command(:is_enabled, @test_table)) + + get = Get.new(Bytes.toBytes('1')) + res = test_table.table.get(get) + puts "res for row '1' and column f:a: #{res}" + assert_false(res.isEmpty) + assert_equal('2', Bytes.toString(res.getValue(Bytes.toBytes('f'), Bytes.toBytes('a')))) + end + + def assert_trailer(offline_trailer, live_trailer = nil) + assert_not_nil(offline_trailer) + assert_not_nil(offline_trailer.getEncryptionKey) + assert_not_nil(offline_trailer.getKEKMetadata) + assert_not_nil(offline_trailer.getKEKChecksum) + assert_not_nil(offline_trailer.getKeyNamespace) + + return unless live_trailer + + assert_equal(live_trailer.getEncryptionKey, offline_trailer.getEncryptionKey) + assert_equal(live_trailer.getKEKMetadata, offline_trailer.getKEKMetadata) + assert_equal(live_trailer.getKEKChecksum, offline_trailer.getKEKChecksum) + assert_equal(live_trailer.getKeyNamespace, offline_trailer.getKeyNamespace) + end + end +end