Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Do not return Error in case of duplicate layer detetion #4094

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 17 additions & 10 deletions pageserver/src/tenant/layer_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ use anyhow::{bail, Result};
use std::collections::VecDeque;
use std::ops::Range;
use std::sync::Arc;
use tracing::*;
use utils::lsn::Lsn;

use historic_layer_coverage::BufferedHistoricLayerCoverage;
Expand Down Expand Up @@ -275,18 +276,24 @@ where
///
pub(self) fn insert_historic_noflush(&mut self, layer: Arc<L>) -> anyhow::Result<()> {
let key = historic_layer_coverage::LayerKey::from(&*layer);
if self.historic.contains(&key) {
bail!(
"Attempt to insert duplicate layer {} in layer map",
layer.short_id()
);
}
self.historic.insert(key, Arc::clone(&layer));
match self.historic.replace(&key, Arc::clone(&layer), |existing| {
!Self::compare_arced_layers(existing, &layer)
}) {
Replacement::Replaced { .. } => {
if Self::is_l0(&layer) {
bail!("Duplicate L0 layer {}", layer.short_id());
}
warn!("Replace duplicate layer {} in layer map", layer.short_id());
}
Replacement::Unexpected(_) => bail!("Replace layer with itself is prohibited"),
Replacement::NotFound | Replacement::RemovalBuffered => {
self.historic.insert(key, Arc::clone(&layer));

if Self::is_l0(&layer) {
self.l0_delta_layers.push(layer);
if Self::is_l0(&layer) {
self.l0_delta_layers.push(layer);
}
}
}

Ok(())
}

Expand Down
8 changes: 0 additions & 8 deletions pageserver/src/tenant/layer_map/historic_layer_coverage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -417,14 +417,6 @@ impl<Value: Clone> BufferedHistoricLayerCoverage<Value> {
}
}

pub fn contains(&self, layer_key: &LayerKey) -> bool {
match self.buffer.get(layer_key) {
Some(None) => false, // layer remove was buffered
Some(_) => true, // layer insert was buffered
None => self.layers.contains_key(layer_key), // no buffered ops for this layer
}
}

pub fn insert(&mut self, layer_key: LayerKey, value: Value) {
self.buffer.insert(layer_key, Some(value));
}
Expand Down
4 changes: 4 additions & 0 deletions pageserver/src/tenant/timeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3300,6 +3300,10 @@ impl Timeline {

drop(all_keys_iter); // So that deltas_to_compact is no longer borrowed

fail_point!("compact-level0-phase1-finish", |_| {
Err(anyhow::anyhow!("failpoint compact-level0-phase1-finish").into())
});

Ok(CompactLevel0Phase1Result {
new_layers,
deltas_to_compact,
Expand Down
42 changes: 42 additions & 0 deletions test_runner/regress/test_duplicate_layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import time

import pytest
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin


# Test duplicate layer detection
#
# This test sets fail point at the end of first compaction phase:
# after flushing new L1 layers but before deletion of L0 layes
# It should cause generation of duplicate L1 layer by compaction after restart
@pytest.mark.timeout(600)
def test_duplicate_layers(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
env = neon_env_builder.init_start()

# These warnings are expected, when the pageserver is restarted abruptly
env.pageserver.allowed_errors.append(".*found future image layer.*")
env.pageserver.allowed_errors.append(".*found future delta layer.*")
env.pageserver.allowed_errors.append(".*duplicate layer.*")

pageserver_http = env.pageserver.http_client()

# Use aggressive compaction and checkpoint settings
tenant_id, _ = env.neon_cli.create_tenant(
conf={
"checkpoint_distance": f"{1024 ** 2}",
"compaction_target_size": f"{1024 ** 2}",
"compaction_period": "1 s",
"compaction_threshold": "3",
}
)
endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
connstr = endpoint.connstr(options="-csynchronous_commit=off")
pg_bin.run_capture(["pgbench", "-i", "-s10", connstr])

pageserver_http.configure_failpoints(("compact-level0-phase1-finish", "exit"))
koivunej marked this conversation as resolved.
Show resolved Hide resolved

with pytest.raises(Exception):
pg_bin.run_capture(["pgbench", "-P1", "-N", "-c5", "-T500", "-Mprepared", connstr])
env.pageserver.stop()
env.pageserver.start()
time.sleep(10) # let compaction to be performed
koivunej marked this conversation as resolved.
Show resolved Hide resolved