Skip to content

Commit

Permalink
Remove useless commented code
Browse files Browse the repository at this point in the history
  • Loading branch information
flowbehappy authored and JaySon-Huang committed Sep 18, 2019
1 parent 1477b08 commit fb87fd4
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 125 deletions.
38 changes: 0 additions & 38 deletions dbms/src/Storages/DeltaMerge/DiskValueSpace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ void DiskValueSpace::swap(DiskValueSpace & other)
std::swap(page_id, other.page_id);
chunks.swap(other.chunks);

// pk_columns.swap(other.pk_columns);

cache.swap(other.cache);
std::swap(cache_chunks, other.cache_chunks);
}
Expand Down Expand Up @@ -217,7 +215,6 @@ void DiskValueSpace::setChunks(Chunks && new_chunks, WriteBatch & meta_wb, Write
data_wb.delPage(m.second.page_id);

chunks.swap(new_chunks);
// pk_columns = {};

cache.clear();
cache_chunks = 0;
Expand All @@ -234,9 +231,6 @@ void DiskValueSpace::appendChunkWithCache(const OpContext & context, Chunk && ch
context.meta_storage.write(meta_wb);
}

// const auto & handle = context.dm_context.table_handle_define;
// ensurePKColumns(handle, context.data_storage);

chunks.push_back(std::move(chunk));

auto write_rows = block.rows();
Expand All @@ -248,8 +242,6 @@ void DiskValueSpace::appendChunkWithCache(const OpContext & context, Chunk && ch
return;
}

// pk_columns.append(block, handle);

// If former cache is empty, and this chunk is big enough, then no need to cache.
if (cache_chunks == 0
&& (write_rows >= context.dm_context.delta_cache_limit_rows || write_bytes >= context.dm_context.delta_cache_limit_bytes))
Expand Down Expand Up @@ -403,8 +395,6 @@ DiskValueSpace::getMergeBlocks(const ColumnDefine & handle, PageStorage & data_s

auto [start_chunk_index, rows_offset_in_start_chunk] = findChunk(rows_offset, deletes_offset);

// ensurePKColumns(handle, data_storage);

size_t block_rows_start = rows_offset;
size_t block_rows_end = rows_offset;
for (size_t chunk_index = start_chunk_index; chunk_index < chunks.size(); ++chunk_index)
Expand Down Expand Up @@ -455,9 +445,6 @@ bool DiskValueSpace::doFlushCache(const OpContext & context)

HandleRange delete_range = chunks.back().isDeleteRange() ? chunks.back().getDeleteRange() : HandleRange::newNone();

// auto & handle = context.dm_context.table_handle_define;
// ensurePKColumns(handle, context.data_storage);

if (cache_chunks == 1)
{
// One chunk no need to compact.
Expand Down Expand Up @@ -541,14 +528,6 @@ bool DiskValueSpace::doFlushCache(const OpContext & context)
chunks.swap(new_chunks);
cache.clear();
cache_chunks = 0;
// {
// // Remove the last cache_rows of pk_columns and append the compacted data
// PKColumns new_pk_columns(pk_columns.handle_column->cloneResized(in_storage_rows),
// pk_columns.version_column->cloneResized(in_storage_rows));
// new_pk_columns.append(compacted, handle);
//
// pk_columns = new_pk_columns;
// }

// ============================================================

Expand Down Expand Up @@ -605,14 +584,6 @@ BlockInputStreamPtr DiskValueSpace::getInputStream(const ColumnDefines & read_co
return std::make_shared<DVSBlockInputStream>(*this, read_columns, data_storage);
}

//const PKColumns & DiskValueSpace::getPKColumns(const ColumnDefine & handle, PageStorage & data_storage)
//{
// if (!should_cache)
// throw Exception("You should not call this method if should_cache is false");
// ensurePKColumns(handle, data_storage);
// return pk_columns;
//}

size_t DiskValueSpace::num_rows()
{
size_t rows = 0;
Expand Down Expand Up @@ -650,15 +621,6 @@ size_t DiskValueSpace::num_chunks()
return chunks.size();
}

//void DiskValueSpace::ensurePKColumns(const ColumnDefine & handle, PageStorage & data_storage)
//{
// if (!pk_columns && num_rows())
// {
// Block block = read({handle, VERSION_COLUMN_DEFINE}, data_storage, 0, num_rows());
// pk_columns = {std::move(block), handle};
// }
//}

size_t DiskValueSpace::rowsFromBack(size_t chunk_num_from_back)
{
size_t rows = 0;
Expand Down
87 changes: 0 additions & 87 deletions dbms/src/Storages/DeltaMerge/DiskValueSpace.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,87 +28,6 @@ struct BlockOrRange
};
using BlockOrRanges = std::vector<BlockOrRange>;

struct PKColumns
{
ColumnPtr handle_column;
ColumnPtr version_column;

// We don't like to do copy each time update those columns, so let's do a little hack here.
IColumn * handle_column_raw = nullptr;
IColumn * version_column_raw = nullptr;

PKColumns() {}

PKColumns(const ColumnPtr & handle_column, const ColumnPtr & version_column)
: handle_column(handle_column),
version_column(version_column),
handle_column_raw(const_cast<IColumn *>(handle_column.get())),
version_column_raw(const_cast<IColumn *>(version_column.get()))
{
}

PKColumns(const PKColumns & from, size_t offset, size_t limit)
: handle_column(from.handle_column->cloneEmpty()),
version_column(from.version_column->cloneEmpty()),
handle_column_raw(const_cast<IColumn *>(handle_column.get())),
version_column_raw(const_cast<IColumn *>(version_column.get()))
{
handle_column_raw->insertRangeFrom(*from.handle_column, offset, limit);
version_column_raw->insertRangeFrom(*from.version_column, offset, limit);
}

PKColumns(Block && block, const ColumnDefine & handle)
{
if (block)
{
handle_column = block.getByName(handle.name).column;
version_column = block.getByName(VERSION_COLUMN_NAME).column;
}
else
{
handle_column = handle.type->createColumn();
version_column = VERSION_COLUMN_TYPE->createColumn();
}

handle_column_raw = const_cast<IColumn *>(handle_column.get());
version_column_raw = const_cast<IColumn *>(version_column.get());
}

void swap(PKColumns & other)
{
handle_column.swap(other.handle_column);
version_column.swap(other.version_column);

std::swap(handle_column_raw, other.handle_column_raw);
std::swap(version_column_raw, other.version_column_raw);
}

void append(const Block & block, const ColumnDefine & handle)
{
if (!handle_column)
{
handle_column = handle.type->createColumn();
version_column = VERSION_COLUMN_TYPE->createColumn();

handle_column_raw = const_cast<IColumn *>(handle_column.get());
version_column_raw = const_cast<IColumn *>(version_column.get());
}
handle_column_raw->insertRangeFrom(*block.getByName(handle.name).column, 0, block.rows());
version_column_raw->insertRangeFrom(*block.getByName(VERSION_COLUMN_NAME).column, 0, block.rows());
}

Block toBlock(const ColumnDefine & handle) const
{
return {createColumnWithTypeAndName(handle_column, handle.type, handle.name, handle.id),
createColumnWithTypeAndName(version_column, VERSION_COLUMN_TYPE, VERSION_COLUMN_NAME, VERSION_COLUMN_ID)};
}

size_t rows() const { return handle_column->size(); }

explicit operator bool() { return bool(handle_column); }
bool operator!() { return !bool(handle_column); }
};

class DiskValueSpace
{
public:
Expand Down Expand Up @@ -175,8 +94,6 @@ class DiskValueSpace
class DVSBlockInputStream;
BlockInputStreamPtr getInputStream(const ColumnDefines & read_columns, PageStorage & data_storage);

// const PKColumns & getPKColumns(const ColumnDefine & handle, PageStorage & data_storage);

bool tryFlushCache(const OpContext & context, bool force = false);

size_t num_rows();
Expand All @@ -190,8 +107,6 @@ class DiskValueSpace
const Chunks & getChunks() { return chunks; }

private:
// void ensurePKColumns(const ColumnDefine & handle, PageStorage & data_storage);

bool doFlushCache(const OpContext & context);

size_t rowsFromBack(size_t chunks);
Expand All @@ -207,8 +122,6 @@ class DiskValueSpace
PageId page_id;
Chunks chunks;

// PKColumns pk_columns;

// The cache is mainly used to merge fragment chunks.
MutableColumnMap cache;
size_t cache_chunks = 0;
Expand Down

0 comments on commit fb87fd4

Please sign in to comment.