|
17 | 17 | */ |
18 | 18 | package org.apache.hadoop.mapreduce.task.reduce; |
19 | 19 |
|
| 20 | +import static org.mockito.Mockito.mock; |
| 21 | +import org.apache.hadoop.fs.FileSystem; |
| 22 | +import org.apache.hadoop.fs.LocalDirAllocator; |
| 23 | +import org.apache.hadoop.io.compress.CompressionCodec; |
20 | 24 | import org.apache.hadoop.mapred.JobConf; |
| 25 | +import org.apache.hadoop.mapred.MapOutputFile; |
| 26 | +import org.apache.hadoop.mapred.Reporter; |
| 27 | +import org.apache.hadoop.mapred.ShuffleConsumerPlugin; |
| 28 | +import org.apache.hadoop.mapred.Task; |
21 | 29 | import org.apache.hadoop.mapred.TaskAttemptID; |
22 | 30 | import org.apache.hadoop.mapred.TaskStatus; |
| 31 | +import org.apache.hadoop.mapred.TaskUmbilicalProtocol; |
| 32 | +import org.apache.hadoop.mapred.Counters.Counter; |
| 33 | +import org.apache.hadoop.mapred.Task.CombineOutputCollector; |
23 | 34 | import org.apache.hadoop.mapreduce.JobID; |
24 | 35 | import org.apache.hadoop.mapreduce.TaskID; |
25 | 36 | import org.apache.hadoop.mapreduce.TaskType; |
@@ -66,4 +77,150 @@ public void addFetchFailedMap(TaskAttemptID mapTaskId) { |
66 | 77 | 0.0f); |
67 | 78 | Assert.assertTrue(scheduler.waitUntilDone(1)); |
68 | 79 | } |
| 80 | + |
| 81 | + @SuppressWarnings("rawtypes") |
| 82 | + @Test |
| 83 | + public <K, V> void TestAggregatedTransferRate() throws Exception { |
| 84 | + JobConf job = new JobConf(); |
| 85 | + job.setNumMapTasks(10); |
| 86 | + //mock creation |
| 87 | + TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class); |
| 88 | + Reporter mockReporter = mock(Reporter.class); |
| 89 | + FileSystem mockFileSystem = mock(FileSystem.class); |
| 90 | + Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = job.getCombinerClass(); |
| 91 | + @SuppressWarnings("unchecked") // needed for mock with generic |
| 92 | + CombineOutputCollector<K, V> mockCombineOutputCollector = |
| 93 | + (CombineOutputCollector<K, V>) mock(CombineOutputCollector.class); |
| 94 | + org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID = |
| 95 | + mock(org.apache.hadoop.mapreduce.TaskAttemptID.class); |
| 96 | + LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class); |
| 97 | + CompressionCodec mockCompressionCodec = mock(CompressionCodec.class); |
| 98 | + Counter mockCounter = mock(Counter.class); |
| 99 | + TaskStatus mockTaskStatus = mock(TaskStatus.class); |
| 100 | + Progress mockProgress = mock(Progress.class); |
| 101 | + MapOutputFile mockMapOutputFile = mock(MapOutputFile.class); |
| 102 | + Task mockTask = mock(Task.class); |
| 103 | + @SuppressWarnings("unchecked") |
| 104 | + MapOutput<K, V> output = mock(MapOutput.class); |
| 105 | + |
| 106 | + ShuffleConsumerPlugin.Context<K, V> context = |
| 107 | + new ShuffleConsumerPlugin.Context<K, V>(mockTaskAttemptID, job, mockFileSystem, |
| 108 | + mockUmbilical, mockLocalDirAllocator, |
| 109 | + mockReporter, mockCompressionCodec, |
| 110 | + combinerClass, mockCombineOutputCollector, |
| 111 | + mockCounter, mockCounter, mockCounter, |
| 112 | + mockCounter, mockCounter, mockCounter, |
| 113 | + mockTaskStatus, mockProgress, mockProgress, |
| 114 | + mockTask, mockMapOutputFile, null); |
| 115 | + TaskStatus status = new TaskStatus() { |
| 116 | + @Override |
| 117 | + public boolean getIsMap() { |
| 118 | + return false; |
| 119 | + } |
| 120 | + @Override |
| 121 | + public void addFetchFailedMap(TaskAttemptID mapTaskId) { |
| 122 | + } |
| 123 | + }; |
| 124 | + Progress progress = new Progress(); |
| 125 | + ShuffleSchedulerImpl<K, V> scheduler = new ShuffleSchedulerImpl<K, V>(job, status, null, |
| 126 | + null, progress, context.getShuffledMapsCounter(), |
| 127 | + context.getReduceShuffleBytes(), context.getFailedShuffleCounter()); |
| 128 | + TaskAttemptID attemptID0 = new TaskAttemptID( |
| 129 | + new org.apache.hadoop.mapred.TaskID( |
| 130 | + new JobID("test",0), TaskType.MAP, 0), 0); |
| 131 | + |
| 132 | + //adding the 1st interval, 40MB from 60s to 100s |
| 133 | + long bytes = (long)40 * 1024 * 1024; |
| 134 | + scheduler.copySucceeded(attemptID0, new MapHost(null, null), bytes, 60000, 100000, output); |
| 135 | + Assert.assertEquals("copy task(attempt_test_0000_m_000000_0 succeeded at 1.00 MB/s)" |
| 136 | + + " Aggregated copy rate(1 of 10 at 1.00 MB/s)", progress.toString()); |
| 137 | + |
| 138 | + TaskAttemptID attemptID1 = new TaskAttemptID( |
| 139 | + new org.apache.hadoop.mapred.TaskID( |
| 140 | + new JobID("test",0), TaskType.MAP, 1), 1); |
| 141 | + |
| 142 | + //adding the 2nd interval before the 1st interval, 50MB from 0s to 50s |
| 143 | + bytes = (long)50 * 1024 * 1024; |
| 144 | + scheduler.copySucceeded(attemptID1, new MapHost(null, null), bytes, 0, 50000, output); |
| 145 | + Assert.assertEquals("copy task(attempt_test_0000_m_000001_1 succeeded at 1.00 MB/s)" |
| 146 | + + " Aggregated copy rate(2 of 10 at 1.00 MB/s)", progress.toString()); |
| 147 | + |
| 148 | + TaskAttemptID attemptID2 = new TaskAttemptID( |
| 149 | + new org.apache.hadoop.mapred.TaskID( |
| 150 | + new JobID("test",0), TaskType.MAP, 2), 2); |
| 151 | + |
| 152 | + //adding the 3rd interval overlapping with the 1st and the 2nd interval |
| 153 | + //110MB from 25s to 80s |
| 154 | + bytes = (long)110 * 1024 * 1024; |
| 155 | + scheduler.copySucceeded(attemptID2, new MapHost(null, null), bytes, 25000, 80000, output); |
| 156 | + Assert.assertEquals("copy task(attempt_test_0000_m_000002_2 succeeded at 2.00 MB/s)" |
| 157 | + + " Aggregated copy rate(3 of 10 at 2.00 MB/s)", progress.toString()); |
| 158 | + |
| 159 | + TaskAttemptID attemptID3 = new TaskAttemptID( |
| 160 | + new org.apache.hadoop.mapred.TaskID( |
| 161 | + new JobID("test",0), TaskType.MAP, 3), 3); |
| 162 | + |
| 163 | + //adding the 4th interval just after the 2nd interval, 100MB from 100s to 300s |
| 164 | + bytes = (long)100 * 1024 * 1024; |
| 165 | + scheduler.copySucceeded(attemptID3, new MapHost(null, null), bytes, 100000, 300000, output); |
| 166 | + Assert.assertEquals("copy task(attempt_test_0000_m_000003_3 succeeded at 0.50 MB/s)" |
| 167 | + + " Aggregated copy rate(4 of 10 at 1.00 MB/s)", progress.toString()); |
| 168 | + |
| 169 | + TaskAttemptID attemptID4 = new TaskAttemptID( |
| 170 | + new org.apache.hadoop.mapred.TaskID( |
| 171 | + new JobID("test",0), TaskType.MAP, 4), 4); |
| 172 | + |
| 173 | + //adding the 5th interval between after 4th, 50MB from 350s to 400s |
| 174 | + bytes = (long)50 * 1024 * 1024; |
| 175 | + scheduler.copySucceeded(attemptID4, new MapHost(null, null), bytes, 350000, 400000, output); |
| 176 | + Assert.assertEquals("copy task(attempt_test_0000_m_000004_4 succeeded at 1.00 MB/s)" |
| 177 | + + " Aggregated copy rate(5 of 10 at 1.00 MB/s)", progress.toString()); |
| 178 | + |
| 179 | + |
| 180 | + TaskAttemptID attemptID5 = new TaskAttemptID( |
| 181 | + new org.apache.hadoop.mapred.TaskID( |
| 182 | + new JobID("test",0), TaskType.MAP, 5), 5); |
| 183 | + //adding the 6th interval between after 5th, 50MB from 450s to 500s |
| 184 | + bytes = (long)50 * 1024 * 1024; |
| 185 | + scheduler.copySucceeded(attemptID5, new MapHost(null, null), bytes, 450000, 500000, output); |
| 186 | + Assert.assertEquals("copy task(attempt_test_0000_m_000005_5 succeeded at 1.00 MB/s)" |
| 187 | + + " Aggregated copy rate(6 of 10 at 1.00 MB/s)", progress.toString()); |
| 188 | + |
| 189 | + TaskAttemptID attemptID6 = new TaskAttemptID( |
| 190 | + new org.apache.hadoop.mapred.TaskID( |
| 191 | + new JobID("test",0), TaskType.MAP, 6), 6); |
| 192 | + //adding the 7th interval between after 5th and 6th interval, 20MB from 320s to 340s |
| 193 | + bytes = (long)20 * 1024 * 1024; |
| 194 | + scheduler.copySucceeded(attemptID6, new MapHost(null, null), bytes, 320000, 340000, output); |
| 195 | + Assert.assertEquals("copy task(attempt_test_0000_m_000006_6 succeeded at 1.00 MB/s)" |
| 196 | + + " Aggregated copy rate(7 of 10 at 1.00 MB/s)", progress.toString()); |
| 197 | + |
| 198 | + TaskAttemptID attemptID7 = new TaskAttemptID( |
| 199 | + new org.apache.hadoop.mapred.TaskID( |
| 200 | + new JobID("test",0), TaskType.MAP, 7), 7); |
| 201 | + //adding the 8th interval overlapping with 4th, 5th, and 7th 30MB from 290s to 350s |
| 202 | + bytes = (long)30 * 1024 * 1024; |
| 203 | + scheduler.copySucceeded(attemptID7, new MapHost(null, null), bytes, 290000, 350000, output); |
| 204 | + Assert.assertEquals("copy task(attempt_test_0000_m_000007_7 succeeded at 0.50 MB/s)" |
| 205 | + + " Aggregated copy rate(8 of 10 at 1.00 MB/s)", progress.toString()); |
| 206 | + |
| 207 | + TaskAttemptID attemptID8 = new TaskAttemptID( |
| 208 | + new org.apache.hadoop.mapred.TaskID( |
| 209 | + new JobID("test",0), TaskType.MAP, 8), 8); |
| 210 | + //adding the 9th interval overlapping with 5th and 6th, 50MB from 400s to 450s |
| 211 | + bytes = (long)50 * 1024 * 1024; |
| 212 | + scheduler.copySucceeded(attemptID8, new MapHost(null, null), bytes, 400000, 450000, output); |
| 213 | + Assert.assertEquals("copy task(attempt_test_0000_m_000008_8 succeeded at 1.00 MB/s)" |
| 214 | + + " Aggregated copy rate(9 of 10 at 1.00 MB/s)", progress.toString()); |
| 215 | + |
| 216 | + TaskAttemptID attemptID9 = new TaskAttemptID( |
| 217 | + new org.apache.hadoop.mapred.TaskID( |
| 218 | + new JobID("test",0), TaskType.MAP, 9), 9); |
| 219 | + //adding the 10th interval overlapping with all intervals, 500MB from 0s to 500s |
| 220 | + bytes = (long)500 * 1024 * 1024; |
| 221 | + scheduler.copySucceeded(attemptID9, new MapHost(null, null), bytes, 0, 500000, output); |
| 222 | + Assert.assertEquals("copy task(attempt_test_0000_m_000009_9 succeeded at 1.00 MB/s)" |
| 223 | + + " Aggregated copy rate(10 of 10 at 2.00 MB/s)", progress.toString()); |
| 224 | + |
| 225 | + } |
69 | 226 | } |
0 commit comments