Skip to content

Commit

Permalink
SOLR-17579: Refactoring suggested by IDE in ReplicationHandler and te…
Browse files Browse the repository at this point in the history
…sts (#2893)

* Refactoring suggested by IDE.
* Typos, some dead methods and dead variables.
  • Loading branch information
epugh authored Dec 14, 2024
1 parent b41c7dd commit 024f973
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 77 deletions.
3 changes: 2 additions & 1 deletion solr/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,8 @@ Dependency Upgrades

Other Changes
---------------------
(No changes)
* SOLR-17579: Remove unused code and other refactorings in ReplicationHandler and tests. Removed unused public
LOCAL_ACTIVITY_DURING_REPLICATION variable. (Eric Pugh)

================== 9.8.0 ==================
New Features
Expand Down
57 changes: 17 additions & 40 deletions solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,6 @@ public class IndexFetcher {

private Integer soTimeout;

private boolean downloadTlogFiles = false;

private boolean skipCommitOnLeaderVersionZero = true;

private boolean clearLocalIndexFirst = false;
Expand All @@ -209,7 +207,7 @@ public static class IndexFetchResult {
new IndexFetchResult("Local index commit is already in sync with peer", true, null);

public static final IndexFetchResult INDEX_FETCH_FAILURE =
new IndexFetchResult("Fetching lastest index is failed", false, null);
new IndexFetchResult("Fetching latest index is failed", false, null);
public static final IndexFetchResult INDEX_FETCH_SUCCESS =
new IndexFetchResult("Fetching latest index is successful", true, null);
public static final IndexFetchResult LOCK_OBTAIN_FAILED =
Expand All @@ -224,8 +222,6 @@ public static class IndexFetchResult {
public static final IndexFetchResult PEER_INDEX_COMMIT_DELETED =
new IndexFetchResult(
"No files to download because IndexCommit in peer was deleted", false, null);
public static final IndexFetchResult LOCAL_ACTIVITY_DURING_REPLICATION =
new IndexFetchResult("Local index modification during replication", false, null);
public static final IndexFetchResult EXPECTING_NON_LEADER =
new IndexFetchResult("Replicating from leader but I'm the shard leader", false, null);
public static final IndexFetchResult LEADER_IS_NOT_ACTIVE =
Expand Down Expand Up @@ -402,7 +398,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication)
}

/**
* This command downloads all the necessary files from leader to install a index commit point.
* This command downloads all the necessary files from leader to install an index commit point.
* Only changed files are downloaded. It also downloads the conf files (if they are modified).
*
* @param forceReplication force a replication in all cases
Expand Down Expand Up @@ -670,7 +666,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel
latestGeneration);
final long timeTakenSeconds = getReplicationTimeElapsed();
final Long bytesDownloadedPerSecond =
(timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded / timeTakenSeconds) : null);
(timeTakenSeconds != 0 ? bytesDownloaded / timeTakenSeconds : null);
log.info(
"Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
isFullCopyNeeded,
Expand Down Expand Up @@ -798,8 +794,7 @@ private void cleanup(
Directory indexDir,
boolean deleteTmpIdxDir,
File tmpTlogDir,
boolean successfulInstall)
throws IOException {
boolean successfulInstall) {
try {
if (!successfulInstall) {
try {
Expand Down Expand Up @@ -847,7 +842,9 @@ private void cleanup(
log.error("Error releasing indexDir", e);
}
try {
if (tmpTlogDir != null) delTree(tmpTlogDir);
if (tmpTlogDir != null) {
delTree(tmpTlogDir);
}
} catch (Exception e) {
log.error("Error deleting tmpTlogDir", e);
}
Expand Down Expand Up @@ -1368,7 +1365,7 @@ private static boolean slowFileExists(Directory dir, String fileName) throws IOE
* All the files which are common between leader and follower must have same size and same
* checksum else we assume they are not compatible (stale).
*
* @return true if the index stale and we need to download a fresh copy, false otherwise.
* @return true if the index is stale, and we need to download a fresh copy, false otherwise.
* @throws IOException if low level io error
*/
private boolean isIndexStale(Directory dir) throws IOException {
Expand Down Expand Up @@ -1516,7 +1513,7 @@ private void copyTmpConfFiles2Conf(Path tmpconfDir) {
/**
* The tlog files are moved from the tmp dir to the tlog dir as an atomic filesystem operation. A
* backup of the old directory is maintained. If the directory move fails, it will try to revert
* back the original tlog directory.
* the original tlog directory.
*/
private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) {
Path tlogDir =
Expand All @@ -1540,11 +1537,11 @@ private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) {
} catch (IOException e) {
log.error("Unable to rename: {} to: {}", src, tlogDir, e);

// In case of error, try to revert back the original tlog directory
// In case of error, try to revert the original tlog directory
try {
Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e2) {
// bad, we were not able to revert back the original tlog directory
// bad, we were not able to revert the original tlog directory
throw new SolrException(
SolrException.ErrorCode.SERVER_ERROR,
"Unable to rename: " + backupTlogDir + " to: " + tlogDir);
Expand Down Expand Up @@ -1598,23 +1595,6 @@ private Collection<Map<String, Object>> getModifiedConfFiles(
return nameVsFile.isEmpty() ? Collections.emptyList() : nameVsFile.values();
}

/**
* This simulates File.delete exception-wise, since this class has some strange behavior with it.
* The only difference is it returns null on success, throws SecurityException on
* SecurityException, otherwise returns Throwable preventing deletion (instead of false), for
* additional information.
*/
static Throwable delete(File file) {
try {
Files.delete(file.toPath());
return null;
} catch (SecurityException e) {
throw e;
} catch (Throwable other) {
return other;
}
}

static boolean delTree(File dir) {
try {
org.apache.lucene.util.IOUtils.rm(dir.toPath());
Expand Down Expand Up @@ -1730,8 +1710,7 @@ private class FileFetcher {
Map<String, Object> fileDetails,
String saveAs,
String solrParamOutput,
long latestGen)
throws IOException {
long latestGen) {
this.file = file;
this.fileName = (String) fileDetails.get(NAME);
this.size = (Long) fileDetails.get(SIZE);
Expand Down Expand Up @@ -1781,7 +1760,7 @@ private void fetch() throws Exception {
}
} finally {
cleanup();
// if cleanup succeeds . The file is downloaded fully. do an fsync
// if cleanup succeeds, and the file is downloaded fully, then do a fsync.
fsyncService.execute(
() -> {
try {
Expand Down Expand Up @@ -1885,8 +1864,8 @@ private int fetchPackets(FastInputStream fis) throws Exception {
}

/**
* The webcontainer flushes the data only after it fills the buffer size. So, all data has to be
* read as readFully() other wise it fails. So read everything as bytes and then extract an
* The web container flushes the data only after it fills the buffer size. So, all data has to
* be read as readFully() otherwise it fails. So read everything as bytes and then extract an
* integer out of it
*/
private int readInt(byte[] b) {
Expand Down Expand Up @@ -1953,7 +1932,7 @@ private FastInputStream getStream() throws IOException {
}
// wt=filestream this is a custom protocol
params.set(CommonParams.WT, FILE_STREAM);
// This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures
// This happens if there is a failure there is a retry. the offset=<sizedownloaded> ensures
// that the server starts from the offset
if (bytesDownloaded > 0) {
params.set(OFFSET, Long.toString(bytesDownloaded));
Expand Down Expand Up @@ -2046,16 +2025,14 @@ protected class DirectoryFileFetcher extends FileFetcher {
}

private static class LocalFsFile implements FileInterface {
private File copy2Dir;

FileChannel fileChannel;
private FileOutputStream fileOutputStream;
File file;

LocalFsFile(File dir, String saveAs) throws IOException {
this.copy2Dir = dir;

this.file = new File(copy2Dir, saveAs);
this.file = new File(dir, saveAs);

File parentDir = this.file.getParentFile();
if (!parentDir.exists()) {
Expand Down
35 changes: 10 additions & 25 deletions solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
Expand Down Expand Up @@ -153,8 +152,6 @@ public class ReplicationHandler extends RequestHandlerBase
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
SolrCore core;

private volatile boolean closed = false;

@Override
public Name getPermissionName(AuthorizationContext request) {
return Name.READ_PERM;
Expand Down Expand Up @@ -227,8 +224,6 @@ public String toString() {

private volatile long executorStartTime;

private int numTimesReplicated = 0;

private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();

private Long reserveCommitDuration = readIntervalMs("00:00:10");
Expand Down Expand Up @@ -323,8 +318,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
* @see IndexFetcher.LocalFsFileFetcher
* @see IndexFetcher.DirectoryFileFetcher
*/
private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req)
throws IOException {
private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req) {
final CoreReplication coreReplicationAPI = new CoreReplication(core, req, rsp);
String fileName;
String dirType;
Expand Down Expand Up @@ -800,14 +794,6 @@ private Date getNextScheduledExecTime() {
return nextTime;
}

int getTimesReplicatedSinceStartup() {
return numTimesReplicated;
}

void setTimesReplicatedSinceStartup() {
numTimesReplicated++;
}

@Override
public Category getCategory() {
return Category.REPLICATION;
Expand Down Expand Up @@ -1043,7 +1029,7 @@ private NamedList<Object> getReplicationDetails(
follower.add("replicationStartTime", replicationStartTimeStamp.toString());
}
long elapsed = fetcher.getReplicationTimeElapsed();
follower.add("timeElapsed", String.valueOf(elapsed) + "s");
follower.add("timeElapsed", elapsed + "s");

if (bytesDownloaded > 0)
estimatedTimeRemaining =
Expand Down Expand Up @@ -1108,13 +1094,13 @@ private Object formatVal(String key, Properties props, Class<?> clzz) {
if (s == null || s.trim().length() == 0) return null;
if (clzz == Date.class) {
try {
Long l = Long.parseLong(s);
long l = Long.parseLong(s);
return new Date(l).toString();
} catch (NumberFormatException e) {
return null;
}
} else if (clzz == List.class) {
String ss[] = s.split(",");
String[] ss = s.split(",");
List<String> l = new ArrayList<>();
for (String s1 : ss) {
l.add(new Date(Long.parseLong(s1)).toString());
Expand Down Expand Up @@ -1272,11 +1258,11 @@ public void inform(SolrCore core) {
if (enableLeader) {
includeConfFiles = (String) leader.get(CONF_FILES);
if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
List<String> files = Arrays.asList(includeConfFiles.split(","));
String[] files = includeConfFiles.split(",");
for (String file : files) {
if (file.trim().length() == 0) continue;
String[] strs = file.trim().split(":");
// if there is an alias add it or it is null
// if there is an alias add it, or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
log.info("Replication enabled for following config files: {}", includeConfFiles);
Expand Down Expand Up @@ -1347,7 +1333,7 @@ public void inform(SolrCore core) {
}
}

// ensure the writer is init'd so that we have a list of commit points
// ensure the writer is initialized so that we have a list of commit points
RefCounted<IndexWriter> iw =
core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
iw.decref();
Expand Down Expand Up @@ -1532,7 +1518,8 @@ private Long readIntervalNs(String interval) {
public static final String FETCH_FROM_LEADER = "fetchFromLeader";

// in case of TLOG replica, if leaderVersion = zero, don't do commit
// otherwise updates from current tlog won't copied over properly to the new tlog, leading to data
// otherwise updates from current tlog won't be copied over properly to the new tlog, leading to
// data
// loss
// don't commit on leader version zero for PULL replicas as PULL should only get its index
// state from leader
Expand Down Expand Up @@ -1576,8 +1563,6 @@ private Long readIntervalNs(String interval) {

public static final String ALIAS = "alias";

public static final String CONF_CHECKSUM = "confchecksum";

public static final String CONF_FILES = "confFiles";

public static final String REPLICATE_AFTER = "replicateAfter";
Expand All @@ -1601,7 +1586,7 @@ private Long readIntervalNs(String interval) {
/**
* Boolean param for tests that can be specified when using {@link #CMD_FETCH_INDEX} to force the
* current request to block until the fetch is complete. <b>NOTE:</b> This param is not advised
* for non-test code, since the duration of the fetch for non-trivial indexes will likeley cause
* for non-test code, since the duration of the fetch for non-trivial indexes will likely cause
* the request to time out.
*
* @lucene.internal
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -304,8 +304,8 @@ public void doTestDetails() throws Exception {
// check details on the follower a couple of times before & after fetching
for (int i = 0; i < 3; i++) {
NamedList<Object> details = getDetails(followerClient);
assertNotNull(i + ": " + details);
assertNotNull(i + ": " + details.toString(), details.get("follower"));
assertNotNull(i + ": " + details, details);
assertNotNull(i + ": " + details, details.get("follower"));

if (i > 0) {
rQuery(i, "*:*", followerClient);
Expand Down Expand Up @@ -459,7 +459,7 @@ public void doTestReplicateAfterWrite2Follower() throws Exception {
index(followerClient, "id", 555, "name", "name = " + 555);
followerClient.commit(true, true);

// this doc is added to follower so it should show an item w/ that result
// this doc is added to follower, so it should show an item w/ that result
assertEquals(1, numFound(rQuery(1, "id:555", followerClient)));

// Let's fetch the index rather than rely on the polling.
Expand Down Expand Up @@ -528,7 +528,7 @@ public void doTestIndexAndConfigReplication() throws Exception {

followerJetty.stop();

// setup an sub directory /foo/ in order to force subdir file replication
// set up a subdirectory /foo/ in order to force subdir file replication
File leaderFooDir = new File(leader.getConfDir() + File.separator + "foo");
File leaderBarFile = new File(leaderFooDir, "bar.txt");
assertTrue("could not make dir " + leaderFooDir, leaderFooDir.mkdirs());
Expand All @@ -551,7 +551,7 @@ public void doTestIndexAndConfigReplication() throws Exception {
followerQueryRsp = rQuery(1, "*:*", followerClient);
assertVersions(leaderClient, followerClient);
SolrDocument d = ((SolrDocumentList) followerQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
assertEquals("newname = 2000", d.getFieldValue("newname"));

assertTrue(followerFooDir.isDirectory());
assertTrue(followerBarFile.exists());
Expand Down Expand Up @@ -596,8 +596,8 @@ public void doTestStopPoll() throws Exception {
// get docs from leader and check if number is equal to leader
assertEquals(nDocs + 1, numFound(rQuery(nDocs + 1, "*:*", leaderClient)));

// NOTE: this test is wierd, we want to verify it DOESNT replicate...
// for now, add a sleep for this.., but the logic is wierd.
// NOTE: this test is weird, we want to verify it DOESN'T replicate...
// for now, add a sleep for this... but the logic is weird.
Thread.sleep(3000);

// get docs from follower and check if number is not equal to leader; polling is disabled
Expand Down Expand Up @@ -1583,7 +1583,7 @@ public void testEmptyBackups() throws Exception {

index(leaderClient, "id", "1", "name", "foo");

{ // second backup w/uncommited doc
{ // second backup w/uncommitted doc
final String backupName = "empty_backup2";
final GenericSolrRequest req =
new GenericSolrRequest(
Expand Down Expand Up @@ -1695,7 +1695,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min)
return startTime;
}
} catch (SolrException e) {
// workarround for SOLR-4668
// workaround for SOLR-4668
if (500 != e.code()) {
throw e;
} // else server possibly from the core reload in progress...
Expand All @@ -1705,7 +1705,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min)
Thread.sleep(sleepInterval);
}
fail("timed out waiting for collection1 startAt time to exceed: " + min);
return min; // compilation neccessity
return min; // compilation necessity
}
}

Expand Down
Loading

0 comments on commit 024f973

Please sign in to comment.