Skip to content

Commit

Permalink
Merge branch 'master' into DORIS-12389
Browse files Browse the repository at this point in the history
  • Loading branch information
kaijchen authored Sep 5, 2024
2 parents a73d971 + ae78c52 commit 6b36583
Show file tree
Hide file tree
Showing 40 changed files with 988 additions and 273 deletions.
3 changes: 0 additions & 3 deletions .asf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ github:
strict: false
contexts:
- License Check
- Clang Formatter
- CheckStyle
- P0 Regression (Doris Regression)
- External Regression (Doris External Regression)
Expand Down Expand Up @@ -87,7 +86,6 @@ github:
strict: false
contexts:
- License Check
- Clang Formatter
- CheckStyle
- Build Broker
- ShellCheck
Expand All @@ -109,7 +107,6 @@ github:
strict: false
contexts:
- License Check
- Clang Formatter
- CheckStyle
- P0 Regression (Doris Regression)
- External Regression (Doris External Regression)
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/clang-format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ jobs:
git checkout 6adbe14579e5b8e19eb3e31e5ff2479f3bd302c7
popd &>/dev/null
- name: Install Python dependencies
uses: actions/setup-python@v5
with:
python-version: '3.10' # Adjust if needed

- name: "Format it!"
if: ${{ steps.filter.outputs.changes == 'true' }}
uses: ./.github/actions/clang-format-lint-action
Expand Down
10 changes: 4 additions & 6 deletions be/src/exec/decompressor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ Status Lz4BlockDecompressor::decompress(uint8_t* input, size_t input_len, size_t
}

std::size_t decompressed_large_block_len = 0;
do {
while (remaining_decompressed_large_block_len > 0) {
// Check that input length should not be negative.
if (input_len < sizeof(uint32_t)) {
*more_input_bytes = sizeof(uint32_t) - input_len;
Expand Down Expand Up @@ -505,8 +505,7 @@ Status Lz4BlockDecompressor::decompress(uint8_t* input, size_t input_len, size_t
output_ptr += decompressed_small_block_len;
remaining_decompressed_large_block_len -= decompressed_small_block_len;
decompressed_large_block_len += decompressed_small_block_len;

} while (remaining_decompressed_large_block_len > 0);
};

if (*more_input_bytes != 0) {
// Need more input buffer
Expand Down Expand Up @@ -586,7 +585,7 @@ Status SnappyBlockDecompressor::decompress(uint8_t* input, size_t input_len,
}

std::size_t decompressed_large_block_len = 0;
do {
while (remaining_decompressed_large_block_len > 0) {
// Check that input length should not be negative.
if (input_len < sizeof(uint32_t)) {
*more_input_bytes = sizeof(uint32_t) - input_len;
Expand Down Expand Up @@ -630,8 +629,7 @@ Status SnappyBlockDecompressor::decompress(uint8_t* input, size_t input_len,
output_ptr += decompressed_small_block_len;
remaining_decompressed_large_block_len -= decompressed_small_block_len;
decompressed_large_block_len += decompressed_small_block_len;

} while (remaining_decompressed_large_block_len > 0);
};

if (*more_input_bytes != 0) {
// Need more input buffer
Expand Down
68 changes: 1 addition & 67 deletions be/src/olap/cumulative_compaction_time_series_policy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,11 @@ namespace doris {

uint32_t TimeSeriesCumulativeCompactionPolicy::calc_cumulative_compaction_score(Tablet* tablet) {
uint32_t score = 0;
uint32_t level0_score = 0;
bool base_rowset_exist = false;
const int64_t point = tablet->cumulative_layer_point();

int64_t level0_total_size = 0;
RowsetMetaSharedPtr first_meta;
int64_t first_version = INT64_MAX;
std::list<RowsetMetaSharedPtr> checked_rs_metas;
// NOTE: tablet._meta_lock is hold
auto& rs_metas = tablet->tablet_meta()->all_rs_metas();
// check the base rowset and collect the rowsets of cumulative part
Expand All @@ -53,12 +50,6 @@ uint32_t TimeSeriesCumulativeCompactionPolicy::calc_cumulative_compaction_score(
} else {
// collect the rowsets of cumulative part
score += rs_meta->get_compaction_score();
if (rs_meta->compaction_level() == 0) {
level0_total_size += rs_meta->total_disk_size();
level0_score += rs_meta->get_compaction_score();
} else {
checked_rs_metas.push_back(rs_meta);
}
}
}

Expand All @@ -73,64 +64,7 @@ uint32_t TimeSeriesCumulativeCompactionPolicy::calc_cumulative_compaction_score(
return 0;
}

// Condition 1: the size of input files for compaction meets the requirement of parameter compaction_goal_size
int64_t compaction_goal_size_mbytes =
tablet->tablet_meta()->time_series_compaction_goal_size_mbytes();
if (level0_total_size >= compaction_goal_size_mbytes * 1024 * 1024) {
return score;
}

// Condition 2: the number of input files reaches the threshold specified by parameter compaction_file_count_threshold
if (level0_score >= tablet->tablet_meta()->time_series_compaction_file_count_threshold()) {
return score;
}

// Condition 3: level1 achieve compaction_goal_size
if (tablet->tablet_meta()->time_series_compaction_level_threshold() >= 2) {
checked_rs_metas.sort([](const RowsetMetaSharedPtr& a, const RowsetMetaSharedPtr& b) {
return a->version().first < b->version().first;
});
int32_t rs_meta_count = 0;
int64_t continuous_size = 0;
for (const auto& rs_meta : checked_rs_metas) {
rs_meta_count++;
continuous_size += rs_meta->total_disk_size();
if (rs_meta_count >= 2) {
if (continuous_size >= compaction_goal_size_mbytes * 1024 * 1024) {
return score;
}
}
}
}

int64_t now = UnixMillis();
int64_t last_cumu = tablet->last_cumu_compaction_success_time();
if (last_cumu != 0) {
int64_t cumu_interval = now - last_cumu;

// Condition 4: the time interval between compactions exceeds the value specified by parameter _compaction_time_threshold_second
if (cumu_interval >
(tablet->tablet_meta()->time_series_compaction_time_threshold_seconds() * 1000)) {
return score;
}
} else if (score > 0) {
// If the compaction process has not been successfully executed,
// the condition for triggering compaction based on the last successful compaction time (condition 3) will never be met
tablet->set_last_cumu_compaction_success_time(now);
}

// Condition 5: If there is a continuous set of empty rowsets, prioritize merging.
std::vector<RowsetSharedPtr> input_rowsets;
std::vector<RowsetSharedPtr> candidate_rowsets =
tablet->pick_candidate_rowsets_to_cumulative_compaction();
tablet->calc_consecutive_empty_rowsets(
&input_rowsets, candidate_rowsets,
tablet->tablet_meta()->time_series_compaction_empty_rowsets_threshold());
if (!input_rowsets.empty()) {
return score;
}

return 0;
return score;
}

void TimeSeriesCumulativeCompactionPolicy::calculate_cumulative_point(
Expand Down
3 changes: 2 additions & 1 deletion be/src/olap/rowset/segment_v2/segment_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,8 @@ Status SegmentWriter::append_block_with_partial_content(const vectorized::Block*
segment_pos);

} else {
if (!_opts.rowset_ctx->partial_update_info->can_insert_new_rows_in_partial_update) {
if (!_opts.rowset_ctx->partial_update_info->can_insert_new_rows_in_partial_update &&
!have_delete_sign) {
std::string error_column;
for (auto cid : _opts.rowset_ctx->partial_update_info->missing_cids) {
const TabletColumn& col = _tablet_schema->column(cid);
Expand Down
3 changes: 2 additions & 1 deletion be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,8 @@ Status VerticalSegmentWriter::_append_block_with_partial_content(RowsInBlock& da
DeleteBitmap::TEMP_VERSION_COMMON},
segment_pos);
} else {
if (!_opts.rowset_ctx->partial_update_info->can_insert_new_rows_in_partial_update) {
if (!_opts.rowset_ctx->partial_update_info->can_insert_new_rows_in_partial_update &&
!have_delete_sign) {
std::string error_column;
for (auto cid : _opts.rowset_ctx->partial_update_info->missing_cids) {
const TabletColumn& col = _tablet_schema->column(cid);
Expand Down
4 changes: 4 additions & 0 deletions be/src/pipeline/exec/result_sink_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ struct ResultFileOptions {
std::string file_suffix;
//Bring BOM when exporting to CSV format
bool with_bom = false;
int64_t orc_writer_version = 0;

ResultFileOptions(const TResultFileSinkOptions& t_opt) {
file_path = t_opt.file_path;
Expand Down Expand Up @@ -108,6 +109,9 @@ struct ResultFileOptions {
if (t_opt.__isset.orc_compression_type) {
orc_compression_type = t_opt.orc_compression_type;
}
if (t_opt.__isset.orc_writer_version) {
orc_writer_version = t_opt.orc_writer_version;
}
}
};

Expand Down
18 changes: 18 additions & 0 deletions be/src/service/point_query_executor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
#include "runtime/thread_context.h"
#include "util/key_util.h"
#include "util/runtime_profile.h"
#include "util/simd/bits.h"
#include "util/thrift_util.h"
#include "vec/columns/columns_number.h"
#include "vec/data_types/serde/data_type_serde.h"
#include "vec/exprs/vexpr.h"
#include "vec/exprs/vexpr_context.h"
Expand Down Expand Up @@ -143,6 +145,9 @@ Status Reusable::init(const TDescriptorTable& t_desc_tbl, const std::vector<TExp
extract_slot_ref(expr->root(), tuple_desc(), output_slot_descs);
}

// get the delete sign idx in block
_delete_sign_idx = _col_uid_to_idx[schema.columns()[schema.delete_sign_idx()]->unique_id()];

if (schema.have_column(BeConsts::ROW_STORE_COL)) {
const auto& column = *DORIS_TRY(schema.column(BeConsts::ROW_STORE_COL));
_row_store_column_ids = column.unique_id();
Expand Down Expand Up @@ -483,6 +488,19 @@ Status PointQueryExecutor::_lookup_row_data() {
}
}
}
// filter rows by delete sign
if (_row_hits > 0 && _reusable->delete_sign_idx() != -1) {
vectorized::ColumnPtr delete_filter_columns =
_result_block->get_columns()[_reusable->delete_sign_idx()];
const auto& filter =
assert_cast<const vectorized::ColumnInt8*>(delete_filter_columns.get())->get_data();
size_t count = filter.size() - simd::count_zero_num((int8_t*)filter.data(), filter.size());
if (count == filter.size()) {
_result_block->clear();
} else if (count > 0) {
return Status::NotSupported("Not implemented since only single row at present");
}
}
return Status::OK();
}

Expand Down
5 changes: 5 additions & 0 deletions be/src/service/point_query_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ class Reusable {

RuntimeState* runtime_state() { return _runtime_state.get(); }

// delete sign idx in block
int32_t delete_sign_idx() const { return _delete_sign_idx; }

private:
// caching TupleDescriptor, output_expr, etc...
std::unique_ptr<RuntimeState> _runtime_state;
Expand All @@ -118,6 +121,8 @@ class Reusable {
std::unordered_set<int32_t> _missing_col_uids;
// included cids in rowstore(column group)
std::unordered_set<int32_t> _include_col_uids;
// delete sign idx in block
int32_t _delete_sign_idx = -1;
};

// RowCache is a LRU cache for row store
Expand Down
5 changes: 3 additions & 2 deletions be/src/vec/data_types/data_type_decimal.h
Original file line number Diff line number Diff line change
Expand Up @@ -595,10 +595,11 @@ void convert_from_decimal(typename ToDataType::FieldType* dst,
dst[i] = static_cast<ToFieldType>(src[i].value) / multiplier.value;
}
}
FromDataType from_data_type(precision, scale);
if constexpr (narrow_integral) {
FromDataType from_data_type(precision, scale);
for (size_t i = 0; i < size; i++) {
if (dst[i] < min_result || dst[i] > max_result) {
if (std::isnan(dst[i]) || std::isinf(dst[i]) || dst[i] < min_result ||
dst[i] > max_result) {
THROW_DECIMAL_CONVERT_OVERFLOW_EXCEPTION(from_data_type.to_string(src[i]),
from_data_type.get_name(),
ToDataType {}.get_name());
Expand Down
5 changes: 5 additions & 0 deletions be/src/vec/sink/writer/vfile_result_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,11 @@ VFileResultWriter::VFileResultWriter(
Status VFileResultWriter::open(RuntimeState* state, RuntimeProfile* profile) {
_state = state;
_init_profile(profile);
// check orc writer version
if (_file_opts->file_format == TFileFormatType::FORMAT_ORC &&
_file_opts->orc_writer_version < 1) {
return Status::InternalError("orc writer version is less than 1.");
}
// Delete existing files
if (_file_opts->delete_existing_files) {
RETURN_IF_ERROR(_delete_dir());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ TEST_F(TestTimeSeriesCumulativeCompactionPolicy, calc_cumulative_compaction_scor
const uint32_t score = _tablet->calc_compaction_score(CompactionType::CUMULATIVE_COMPACTION,
cumulative_compaction_policy);

EXPECT_EQ(0, score);
EXPECT_EQ(9, score);
}

TEST_F(TestTimeSeriesCumulativeCompactionPolicy, calc_cumulative_compaction_score_big_rowset) {
Expand Down
2 changes: 1 addition & 1 deletion cloud/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ endif()

if (NOT EXISTS "${THIRDPARTY_SRC}/${FDB_LIB}")
file(MAKE_DIRECTORY ${THIRDPARTY_SRC})
execute_process(COMMAND "curl" "${FDB_LIB_URL}"
execute_process(COMMAND "curl --retry 10 --retry-delay 2 --retry-max-time 30" "${FDB_LIB_URL}"
"-o" "${THIRDPARTY_SRC}/${FDB_LIB}" "-k"
RESULTS_VARIABLE DOWNLOAD_RET)
if (NOT ${DOWNLOAD_RET} STREQUAL "0")
Expand Down
27 changes: 19 additions & 8 deletions cloud/script/start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,10 @@ fi

echo "LIBHDFS3_CONF=${LIBHDFS3_CONF}"

export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1"
# to enable dump jeprof heap stats prodigally, change `prof:false` to `prof:true`
# to control the dump interval change `lg_prof_interval` to a specific value, it is pow/exponent of 2 in size of bytes, default 34 means 2 ** 34 = 16GB
# to control the dump path, change `prof_prefix` to a specific path, e.g. /doris_cloud/log/ms_, by default it dumps at the path where the start command called
export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof_prefix:ms_,prof:false,lg_prof_interval:34"

if [[ "${RUN_VERSION}" -eq 1 ]]; then
"${bin}" --version
Expand All @@ -131,14 +134,22 @@ fi

mkdir -p "${DORIS_HOME}/log"
echo "starts ${process} with args: $*"
out_file=${DORIS_HOME}/log/${process}.out
if [[ "${RUN_DAEMON}" -eq 1 ]]; then
date >>"${DORIS_HOME}/log/${process}.out"
nohup "${bin}" "$@" >>"${DORIS_HOME}/log/${process}.out" 2>&1 &
# wait for log flush
sleep 1.5
tail -n10 "${DORIS_HOME}/log/${process}.out" | grep 'working directory' -B1 -A10
echo "please check process log for more details"
echo ""
# append 10 blank lines to ensure the following tail -n10 works correctly
printf "\n\n\n\n\n\n\n\n\n\n" >>"${out_file}"
echo "$(date +'%F %T') try to start ${process}" >>"${out_file}"
nohup "${bin}" "$@" >>"${out_file}" 2>&1 &
echo "wait and check ${process} start successfully"
sleep 3
tail -n10 "${out_file}" | grep 'successfully started brpc'
ret=$?
if [[ ${ret} -ne 0 ]]; then
echo "${process} may not start successfully please check process log for more details"
exit 1
fi
echo "${process} start successfully"
exit 0
elif [[ "${RUN_CONSOLE}" -eq 1 ]]; then
export DORIS_LOG_TO_STDERR=1
date
Expand Down
1 change: 1 addition & 0 deletions cloud/src/common/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ CONF_mInt32(scan_instances_interval_seconds, "60"); // 1min
CONF_mInt32(check_object_interval_seconds, "43200"); // 12hours

CONF_mInt64(check_recycle_task_interval_seconds, "600"); // 10min
CONF_mInt64(recycler_sleep_before_scheduling_seconds, "60");
// log a warning if a recycle task takes longer than this duration
CONF_mInt64(recycle_task_threshold_seconds, "10800"); // 3h

Expand Down
Loading

0 comments on commit 6b36583

Please sign in to comment.