diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineWorkerPoolOptions.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineWorkerPoolOptions.java
index bc450256b440..ec3ef7a6db28 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineWorkerPoolOptions.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineWorkerPoolOptions.java
@@ -218,11 +218,11 @@ public String create(PipelineOptions options) {
/**
* Specifies whether worker pools should be started with public IP addresses.
*
- *
WARNING: This feature is experimental. You must be whitelisted to use it.
+ *
WARNING: This feature is experimental. You must be allowlisted to use it.
*/
@Description(
"Specifies whether worker pools should be started with public IP addresses. WARNING:"
- + "This feature is experimental. You must be whitelisted to use it.")
+ + "This feature is experimental. You must be allowlisted to use it.")
@Experimental
@JsonIgnore
@Nullable
diff --git a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
index 181c6f085da8..b487a7a12948 100644
--- a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
+++ b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
@@ -88,7 +88,7 @@ page at http://checkstyle.sourceforge.net/config.html -->
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/coders/AvroCoder.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/coders/AvroCoder.java
index a125444af728..7d8b9e9b10f8 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/coders/AvroCoder.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/coders/AvroCoder.java
@@ -490,7 +490,7 @@ private void doCheck(String context, TypeDescriptor> type, Schema schema) {
private void checkString(String context, TypeDescriptor> type) {
// For types that are encoded as strings, we need to make sure they're in an approved
- // whitelist. For other types that are annotated @Stringable, Avro will just use the
+ // list. For other types that are annotated @Stringable, Avro will just use the
// #toString() methods, which has no guarantees of determinism.
if (!DETERMINISTIC_STRINGABLE_CLASSES.contains(type.getRawType())) {
reportError(context, "%s may not have deterministic #toString()", type);
diff --git a/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/BeamSqlDslSqlStdOperatorsTest.java b/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/BeamSqlDslSqlStdOperatorsTest.java
index 87cc62570d47..8d8d820f7906 100644
--- a/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/BeamSqlDslSqlStdOperatorsTest.java
+++ b/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/BeamSqlDslSqlStdOperatorsTest.java
@@ -178,7 +178,7 @@ private Set getDeclaredOperators() {
Comparator.comparing((SqlOperatorId operator) -> operator.name()),
Comparator.comparing((SqlOperatorId operator) -> operator.kind())));
- /** Smoke test that the whitelists and utility functions actually work. */
+ /** Smoke test that the allowlist and utility functions actually work. */
@Test
@SqlOperatorTest(name = "CARDINALITY", kind = "OTHER_FUNCTION")
public void testAnnotationEquality() throws Exception {
diff --git a/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlAnalyzer.java b/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlAnalyzer.java
index 4ae67eba8447..cca5b452e147 100644
--- a/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlAnalyzer.java
+++ b/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlAnalyzer.java
@@ -19,7 +19,7 @@
import static com.google.zetasql.ZetaSQLResolvedNodeKind.ResolvedNodeKind.RESOLVED_CREATE_FUNCTION_STMT;
import static com.google.zetasql.ZetaSQLResolvedNodeKind.ResolvedNodeKind.RESOLVED_QUERY_STMT;
-import static org.apache.beam.sdk.extensions.sql.zetasql.SqlStdOperatorMappingTable.ZETASQL_BUILTIN_FUNCTION_WHITELIST;
+import static org.apache.beam.sdk.extensions.sql.zetasql.SqlStdOperatorMappingTable.ZETASQL_BUILTIN_FUNCTION_ALLOWLIST;
import static org.apache.beam.sdk.extensions.sql.zetasql.ZetaSqlCalciteTranslationUtils.toZetaType;
import com.google.common.collect.ImmutableList;
@@ -170,7 +170,7 @@ private void addBuiltinFunctionsToCatalog(SimpleCatalog catalog, AnalyzerOptions
ZetaSQLBuiltinFunctionOptions zetasqlBuiltinFunctionOptions =
new ZetaSQLBuiltinFunctionOptions(options.getLanguageOptions());
- ZETASQL_BUILTIN_FUNCTION_WHITELIST.forEach(
+ ZETASQL_BUILTIN_FUNCTION_ALLOWLIST.forEach(
zetasqlBuiltinFunctionOptions::includeFunctionSignatureId);
catalog.addZetaSQLFunctions(zetasqlBuiltinFunctionOptions);
diff --git a/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlStdOperatorMappingTable.java b/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlStdOperatorMappingTable.java
index d8a0c728d22d..b89e0530349c 100644
--- a/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlStdOperatorMappingTable.java
+++ b/sdks/java/extensions/sql/zetasql/src/main/java/org/apache/beam/sdk/extensions/sql/zetasql/SqlStdOperatorMappingTable.java
@@ -29,7 +29,7 @@
/** SqlStdOperatorMappingTable. */
@Internal
public class SqlStdOperatorMappingTable {
- static final List ZETASQL_BUILTIN_FUNCTION_WHITELIST =
+ static final List ZETASQL_BUILTIN_FUNCTION_ALLOWLIST =
ImmutableList.of(
FunctionSignatureId.FN_AND,
FunctionSignatureId.FN_ANY_VALUE,
diff --git a/sdks/java/io/hadoop-format/src/test/resources/cassandra.yaml b/sdks/java/io/hadoop-format/src/test/resources/cassandra.yaml
index ca1e48fd8eed..1c6e7c3e4ac7 100644
--- a/sdks/java/io/hadoop-format/src/test/resources/cassandra.yaml
+++ b/sdks/java/io/hadoop-format/src/test/resources/cassandra.yaml
@@ -63,7 +63,7 @@ num_tokens: 1
# See http://wiki.apache.org/cassandra/HintedHandoff
# May either be "true" or "false" to enable globally
hinted_handoff_enabled: true
-# When hinted_handoff_enabled is true, a black list of data centers that will not
+# When hinted_handoff_enabled is true, a blocklist of data centers that will not
# perform hinted handoff
#hinted_handoff_disabled_datacenters:
# - DC1
diff --git a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
index 5475785b657a..b3f4b9fd3077 100644
--- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
+++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
@@ -1258,9 +1258,9 @@ public WriteRecords withPublishTimestampFunction(
* transform ties checkpointing semantics in compatible Beam runners and transactions in Kafka
* (version 0.11+) to ensure a record is written only once. As the implementation relies on
* runners checkpoint semantics, not all the runners are compatible. The sink throws an
- * exception during initialization if the runner is not whitelisted. Flink runner is one of the
- * runners whose checkpoint semantics are not compatible with current implementation (hope to
- * provide a solution in near future). Dataflow runner and Spark runners are whitelisted as
+ * exception during initialization if the runner is not explicitly allowed. Flink runner is one
+ * of the runners whose checkpoint semantics are not compatible with current implementation
+ * (hope to provide a solution in near future). Dataflow runner and Spark runners are
* compatible.
*
* Note on performance: Exactly-once sink involves two shuffles of the records. In addition
@@ -1337,9 +1337,9 @@ public void validate(PipelineOptions options) {
}
throw new UnsupportedOperationException(
runner
- + " is not whitelisted among runners compatible with Kafka exactly-once sink. "
+ + " is not a runner known to be compatible with Kafka exactly-once sink. "
+ "This implementation of exactly-once sink relies on specific checkpoint guarantees. "
- + "Only the runners with known to have compatible checkpoint semantics are whitelisted.");
+ + "Only the runners with known to have compatible checkpoint semantics are allowed.");
}
}