From 024f9737178624065d0ceada4d3b9fd5aac80341 Mon Sep 17 00:00:00 2001 From: Eric Pugh Date: Sat, 14 Dec 2024 06:35:57 -0500 Subject: [PATCH] SOLR-17579: Refactoring suggested by IDE in ReplicationHandler and tests (#2893) * Refactoring suggested by IDE. * Typos, some dead methods and dead variables. --- solr/CHANGES.txt | 3 +- .../org/apache/solr/handler/IndexFetcher.java | 57 ++++++------------- .../solr/handler/ReplicationHandler.java | 35 ++++-------- .../solr/handler/TestReplicationHandler.java | 20 +++---- .../handler/TestReplicationHandlerBackup.java | 2 +- 5 files changed, 40 insertions(+), 77 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 2eefba28dbb..a5f6c24d1d1 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -164,7 +164,8 @@ Dependency Upgrades Other Changes --------------------- -(No changes) +* SOLR-17579: Remove unused code and other refactorings in ReplicationHandler and tests. Removed unused public + LOCAL_ACTIVITY_DURING_REPLICATION variable. (Eric Pugh) ================== 9.8.0 ================== New Features diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java index a933bb6e1d6..78244fdaedf 100644 --- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java +++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java @@ -187,8 +187,6 @@ public class IndexFetcher { private Integer soTimeout; - private boolean downloadTlogFiles = false; - private boolean skipCommitOnLeaderVersionZero = true; private boolean clearLocalIndexFirst = false; @@ -209,7 +207,7 @@ public static class IndexFetchResult { new IndexFetchResult("Local index commit is already in sync with peer", true, null); public static final IndexFetchResult INDEX_FETCH_FAILURE = - new IndexFetchResult("Fetching lastest index is failed", false, null); + new IndexFetchResult("Fetching latest index is failed", false, null); public static final IndexFetchResult INDEX_FETCH_SUCCESS = new IndexFetchResult("Fetching latest index is successful", true, null); public static final IndexFetchResult LOCK_OBTAIN_FAILED = @@ -224,8 +222,6 @@ public static class IndexFetchResult { public static final IndexFetchResult PEER_INDEX_COMMIT_DELETED = new IndexFetchResult( "No files to download because IndexCommit in peer was deleted", false, null); - public static final IndexFetchResult LOCAL_ACTIVITY_DURING_REPLICATION = - new IndexFetchResult("Local index modification during replication", false, null); public static final IndexFetchResult EXPECTING_NON_LEADER = new IndexFetchResult("Replicating from leader but I'm the shard leader", false, null); public static final IndexFetchResult LEADER_IS_NOT_ACTIVE = @@ -402,7 +398,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication) } /** - * This command downloads all the necessary files from leader to install a index commit point. + * This command downloads all the necessary files from leader to install an index commit point. * Only changed files are downloaded. It also downloads the conf files (if they are modified). * * @param forceReplication force a replication in all cases @@ -670,7 +666,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel latestGeneration); final long timeTakenSeconds = getReplicationTimeElapsed(); final Long bytesDownloadedPerSecond = - (timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded / timeTakenSeconds) : null); + (timeTakenSeconds != 0 ? bytesDownloaded / timeTakenSeconds : null); log.info( "Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, @@ -798,8 +794,7 @@ private void cleanup( Directory indexDir, boolean deleteTmpIdxDir, File tmpTlogDir, - boolean successfulInstall) - throws IOException { + boolean successfulInstall) { try { if (!successfulInstall) { try { @@ -847,7 +842,9 @@ private void cleanup( log.error("Error releasing indexDir", e); } try { - if (tmpTlogDir != null) delTree(tmpTlogDir); + if (tmpTlogDir != null) { + delTree(tmpTlogDir); + } } catch (Exception e) { log.error("Error deleting tmpTlogDir", e); } @@ -1368,7 +1365,7 @@ private static boolean slowFileExists(Directory dir, String fileName) throws IOE * All the files which are common between leader and follower must have same size and same * checksum else we assume they are not compatible (stale). * - * @return true if the index stale and we need to download a fresh copy, false otherwise. + * @return true if the index is stale, and we need to download a fresh copy, false otherwise. * @throws IOException if low level io error */ private boolean isIndexStale(Directory dir) throws IOException { @@ -1516,7 +1513,7 @@ private void copyTmpConfFiles2Conf(Path tmpconfDir) { /** * The tlog files are moved from the tmp dir to the tlog dir as an atomic filesystem operation. A * backup of the old directory is maintained. If the directory move fails, it will try to revert - * back the original tlog directory. + * the original tlog directory. */ private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) { Path tlogDir = @@ -1540,11 +1537,11 @@ private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) { } catch (IOException e) { log.error("Unable to rename: {} to: {}", src, tlogDir, e); - // In case of error, try to revert back the original tlog directory + // In case of error, try to revert the original tlog directory try { Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE); } catch (IOException e2) { - // bad, we were not able to revert back the original tlog directory + // bad, we were not able to revert the original tlog directory throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Unable to rename: " + backupTlogDir + " to: " + tlogDir); @@ -1598,23 +1595,6 @@ private Collection> getModifiedConfFiles( return nameVsFile.isEmpty() ? Collections.emptyList() : nameVsFile.values(); } - /** - * This simulates File.delete exception-wise, since this class has some strange behavior with it. - * The only difference is it returns null on success, throws SecurityException on - * SecurityException, otherwise returns Throwable preventing deletion (instead of false), for - * additional information. - */ - static Throwable delete(File file) { - try { - Files.delete(file.toPath()); - return null; - } catch (SecurityException e) { - throw e; - } catch (Throwable other) { - return other; - } - } - static boolean delTree(File dir) { try { org.apache.lucene.util.IOUtils.rm(dir.toPath()); @@ -1730,8 +1710,7 @@ private class FileFetcher { Map fileDetails, String saveAs, String solrParamOutput, - long latestGen) - throws IOException { + long latestGen) { this.file = file; this.fileName = (String) fileDetails.get(NAME); this.size = (Long) fileDetails.get(SIZE); @@ -1781,7 +1760,7 @@ private void fetch() throws Exception { } } finally { cleanup(); - // if cleanup succeeds . The file is downloaded fully. do an fsync + // if cleanup succeeds, and the file is downloaded fully, then do a fsync. fsyncService.execute( () -> { try { @@ -1885,8 +1864,8 @@ private int fetchPackets(FastInputStream fis) throws Exception { } /** - * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be - * read as readFully() other wise it fails. So read everything as bytes and then extract an + * The web container flushes the data only after it fills the buffer size. So, all data has to + * be read as readFully() otherwise it fails. So read everything as bytes and then extract an * integer out of it */ private int readInt(byte[] b) { @@ -1953,7 +1932,7 @@ private FastInputStream getStream() throws IOException { } // wt=filestream this is a custom protocol params.set(CommonParams.WT, FILE_STREAM); - // This happen if there is a failure there is a retry. the offset= ensures + // This happens if there is a failure there is a retry. the offset= ensures // that the server starts from the offset if (bytesDownloaded > 0) { params.set(OFFSET, Long.toString(bytesDownloaded)); @@ -2046,16 +2025,14 @@ protected class DirectoryFileFetcher extends FileFetcher { } private static class LocalFsFile implements FileInterface { - private File copy2Dir; FileChannel fileChannel; private FileOutputStream fileOutputStream; File file; LocalFsFile(File dir, String saveAs) throws IOException { - this.copy2Dir = dir; - this.file = new File(copy2Dir, saveAs); + this.file = new File(dir, saveAs); File parentDir = this.file.getParentFile(); if (!parentDir.exists()) { diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index d059ad38d53..a72fd4363e8 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -42,7 +42,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -153,8 +152,6 @@ public class ReplicationHandler extends RequestHandlerBase private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); SolrCore core; - private volatile boolean closed = false; - @Override public Name getPermissionName(AuthorizationContext request) { return Name.READ_PERM; @@ -227,8 +224,6 @@ public String toString() { private volatile long executorStartTime; - private int numTimesReplicated = 0; - private final Map confFileInfoCache = new HashMap<>(); private Long reserveCommitDuration = readIntervalMs("00:00:10"); @@ -323,8 +318,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw * @see IndexFetcher.LocalFsFileFetcher * @see IndexFetcher.DirectoryFileFetcher */ - private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req) - throws IOException { + private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req) { final CoreReplication coreReplicationAPI = new CoreReplication(core, req, rsp); String fileName; String dirType; @@ -800,14 +794,6 @@ private Date getNextScheduledExecTime() { return nextTime; } - int getTimesReplicatedSinceStartup() { - return numTimesReplicated; - } - - void setTimesReplicatedSinceStartup() { - numTimesReplicated++; - } - @Override public Category getCategory() { return Category.REPLICATION; @@ -1043,7 +1029,7 @@ private NamedList getReplicationDetails( follower.add("replicationStartTime", replicationStartTimeStamp.toString()); } long elapsed = fetcher.getReplicationTimeElapsed(); - follower.add("timeElapsed", String.valueOf(elapsed) + "s"); + follower.add("timeElapsed", elapsed + "s"); if (bytesDownloaded > 0) estimatedTimeRemaining = @@ -1108,13 +1094,13 @@ private Object formatVal(String key, Properties props, Class clzz) { if (s == null || s.trim().length() == 0) return null; if (clzz == Date.class) { try { - Long l = Long.parseLong(s); + long l = Long.parseLong(s); return new Date(l).toString(); } catch (NumberFormatException e) { return null; } } else if (clzz == List.class) { - String ss[] = s.split(","); + String[] ss = s.split(","); List l = new ArrayList<>(); for (String s1 : ss) { l.add(new Date(Long.parseLong(s1)).toString()); @@ -1272,11 +1258,11 @@ public void inform(SolrCore core) { if (enableLeader) { includeConfFiles = (String) leader.get(CONF_FILES); if (includeConfFiles != null && includeConfFiles.trim().length() > 0) { - List files = Arrays.asList(includeConfFiles.split(",")); + String[] files = includeConfFiles.split(","); for (String file : files) { if (file.trim().length() == 0) continue; String[] strs = file.trim().split(":"); - // if there is an alias add it or it is null + // if there is an alias add it, or it is null confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null); } log.info("Replication enabled for following config files: {}", includeConfFiles); @@ -1347,7 +1333,7 @@ public void inform(SolrCore core) { } } - // ensure the writer is init'd so that we have a list of commit points + // ensure the writer is initialized so that we have a list of commit points RefCounted iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core); iw.decref(); @@ -1532,7 +1518,8 @@ private Long readIntervalNs(String interval) { public static final String FETCH_FROM_LEADER = "fetchFromLeader"; // in case of TLOG replica, if leaderVersion = zero, don't do commit - // otherwise updates from current tlog won't copied over properly to the new tlog, leading to data + // otherwise updates from current tlog won't be copied over properly to the new tlog, leading to + // data // loss // don't commit on leader version zero for PULL replicas as PULL should only get its index // state from leader @@ -1576,8 +1563,6 @@ private Long readIntervalNs(String interval) { public static final String ALIAS = "alias"; - public static final String CONF_CHECKSUM = "confchecksum"; - public static final String CONF_FILES = "confFiles"; public static final String REPLICATE_AFTER = "replicateAfter"; @@ -1601,7 +1586,7 @@ private Long readIntervalNs(String interval) { /** * Boolean param for tests that can be specified when using {@link #CMD_FETCH_INDEX} to force the * current request to block until the fetch is complete. NOTE: This param is not advised - * for non-test code, since the duration of the fetch for non-trivial indexes will likeley cause + * for non-test code, since the duration of the fetch for non-trivial indexes will likely cause * the request to time out. * * @lucene.internal diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java index b30e20f70bf..3096ec10bf4 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -304,8 +304,8 @@ public void doTestDetails() throws Exception { // check details on the follower a couple of times before & after fetching for (int i = 0; i < 3; i++) { NamedList details = getDetails(followerClient); - assertNotNull(i + ": " + details); - assertNotNull(i + ": " + details.toString(), details.get("follower")); + assertNotNull(i + ": " + details, details); + assertNotNull(i + ": " + details, details.get("follower")); if (i > 0) { rQuery(i, "*:*", followerClient); @@ -459,7 +459,7 @@ public void doTestReplicateAfterWrite2Follower() throws Exception { index(followerClient, "id", 555, "name", "name = " + 555); followerClient.commit(true, true); - // this doc is added to follower so it should show an item w/ that result + // this doc is added to follower, so it should show an item w/ that result assertEquals(1, numFound(rQuery(1, "id:555", followerClient))); // Let's fetch the index rather than rely on the polling. @@ -528,7 +528,7 @@ public void doTestIndexAndConfigReplication() throws Exception { followerJetty.stop(); - // setup an sub directory /foo/ in order to force subdir file replication + // set up a subdirectory /foo/ in order to force subdir file replication File leaderFooDir = new File(leader.getConfDir() + File.separator + "foo"); File leaderBarFile = new File(leaderFooDir, "bar.txt"); assertTrue("could not make dir " + leaderFooDir, leaderFooDir.mkdirs()); @@ -551,7 +551,7 @@ public void doTestIndexAndConfigReplication() throws Exception { followerQueryRsp = rQuery(1, "*:*", followerClient); assertVersions(leaderClient, followerClient); SolrDocument d = ((SolrDocumentList) followerQueryRsp.get("response")).get(0); - assertEquals("newname = 2000", (String) d.getFieldValue("newname")); + assertEquals("newname = 2000", d.getFieldValue("newname")); assertTrue(followerFooDir.isDirectory()); assertTrue(followerBarFile.exists()); @@ -596,8 +596,8 @@ public void doTestStopPoll() throws Exception { // get docs from leader and check if number is equal to leader assertEquals(nDocs + 1, numFound(rQuery(nDocs + 1, "*:*", leaderClient))); - // NOTE: this test is wierd, we want to verify it DOESNT replicate... - // for now, add a sleep for this.., but the logic is wierd. + // NOTE: this test is weird, we want to verify it DOESN'T replicate... + // for now, add a sleep for this... but the logic is weird. Thread.sleep(3000); // get docs from follower and check if number is not equal to leader; polling is disabled @@ -1583,7 +1583,7 @@ public void testEmptyBackups() throws Exception { index(leaderClient, "id", "1", "name", "foo"); - { // second backup w/uncommited doc + { // second backup w/uncommitted doc final String backupName = "empty_backup2"; final GenericSolrRequest req = new GenericSolrRequest( @@ -1695,7 +1695,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min) return startTime; } } catch (SolrException e) { - // workarround for SOLR-4668 + // workaround for SOLR-4668 if (500 != e.code()) { throw e; } // else server possibly from the core reload in progress... @@ -1705,7 +1705,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min) Thread.sleep(sleepInterval); } fail("timed out waiting for collection1 startAt time to exceed: " + min); - return min; // compilation neccessity + return min; // compilation necessity } } diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java index 418ce1d6551..032ae0c9c3e 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java +++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java @@ -238,7 +238,7 @@ public void doTestBackup() throws Exception { } } - private void testDeleteNamedBackup(String backupNames[]) throws Exception { + private void testDeleteNamedBackup(String[] backupNames) throws Exception { final BackupStatusChecker backupStatus = new BackupStatusChecker(leaderClient, "/" + DEFAULT_TEST_CORENAME + "/replication"); for (int i = 0; i < 2; i++) {