Skip to content

Commit c6793dd

Browse files
committed
HDFS-8681. BlockScanner is incorrectly disabled by default. (Contributed by Arpit Agarwal)
1 parent 3dfa816 commit c6793dd

File tree

5 files changed

+41
-10
lines changed

5 files changed

+41
-10
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,6 +1113,9 @@ Release 2.7.1 - UNRELEASED
11131113
HDFS08656. Preserve compatibility of ClientProtocol#rollingUpgrade after
11141114
finalization. (wang)
11151115

1116+
HDFS-8681. BlockScanner is incorrectly disabled by default.
1117+
(Arpit Agarwal)
1118+
11161119
Release 2.7.0 - 2015-04-20
11171120

11181121
INCOMPATIBLE CHANGES

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
378378
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
379379
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
380380
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
381-
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
381+
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 24; // 3 weeks.
382382
public static final String DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = "dfs.block.scanner.volume.bytes.per.second";
383383
public static final long DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
384384
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,34 @@ private static long getUnitTestLong(Configuration conf, String key,
115115
}
116116
}
117117

118+
/**
119+
* Determine the configured block scanner interval.
120+
*
121+
* For compatibility with prior releases of HDFS, if the
122+
* configured value is zero then the scan period is
123+
* set to 3 weeks.
124+
*
125+
* If the configured value is less than zero then the scanner
126+
* is disabled.
127+
*
128+
* @param conf Configuration object.
129+
* @return block scan period in milliseconds.
130+
*/
131+
private static long getConfiguredScanPeriodMs(Configuration conf) {
132+
long tempScanPeriodMs = getUnitTestLong(
133+
conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
134+
TimeUnit.MILLISECONDS.convert(conf.getLong(
135+
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
136+
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
137+
138+
if (tempScanPeriodMs == 0) {
139+
tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
140+
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
141+
}
142+
143+
return tempScanPeriodMs;
144+
}
145+
118146
@SuppressWarnings("unchecked")
119147
Conf(Configuration conf) {
120148
this.targetBytesPerSec = Math.max(0L, conf.getLong(
@@ -123,11 +151,7 @@ private static long getUnitTestLong(Configuration conf, String key,
123151
this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
124152
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
125153
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
126-
this.scanPeriodMs = Math.max(0L,
127-
getUnitTestLong(conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
128-
TimeUnit.MILLISECONDS.convert(conf.getLong(
129-
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
130-
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS)));
154+
this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
131155
this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
132156
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
133157
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
@@ -159,7 +183,7 @@ public BlockScanner(DataNode datanode, Configuration conf) {
159183
* no threads will start.
160184
*/
161185
public boolean isEnabled() {
162-
return (conf.scanPeriodMs) > 0 && (conf.targetBytesPerSec > 0);
186+
return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
163187
}
164188

165189
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1071,11 +1071,14 @@
10711071

10721072
<property>
10731073
<name>dfs.datanode.scan.period.hours</name>
1074-
<value>0</value>
1074+
<value>504</value>
10751075
<description>
1076-
If this is 0 or negative, the DataNode's block scanner will be
1077-
disabled. If this is positive, the DataNode will not scan any
1076+
If this is positive, the DataNode will not scan any
10781077
individual block more than once in the specified scan period.
1078+
If this is negative, the block scanner is disabled.
1079+
If this is set to zero, then the default value of 504 hours
1080+
or 3 weeks is used. Prior versions of HDFS incorrectly documented
1081+
that setting this key to zero will disable the block scanner.
10791082
</description>
10801083
</property>
10811084

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,7 @@ public void testRemoveNewlyAddedVolume() throws IOException {
274274
public void testChangeVolumeWithRunningCheckDirs() throws IOException {
275275
RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
276276
new RoundRobinVolumeChoosingPolicy<>();
277+
conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
277278
final BlockScanner blockScanner = new BlockScanner(datanode, conf);
278279
final FsVolumeList volumeList = new FsVolumeList(
279280
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);

0 commit comments

Comments
 (0)