Skip to content

Commit

Permalink
Merge branch 'master' into push_dowm_limit_sample
Browse files Browse the repository at this point in the history
  • Loading branch information
bright-starry-sky authored Sep 13, 2021
2 parents 8fa52a8 + b0908fc commit 34c7c09
Show file tree
Hide file tree
Showing 14 changed files with 100 additions and 41 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ nebula_add_subdirectory(scripts)
include(CPackage)
package(
${ENABLE_PACK_ONE}
"nebula"
"nebula-graph"
"https://github.com/vesoft-inc/nebula/releases"
${CMAKE_SOURCE_DIR}/package
)
2 changes: 2 additions & 0 deletions conf/nebula-graphd.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
# Whether to treat partial success as an error.
# This flag is only used for Read-only access, and Modify access always treats partial success as an error.
--accept_partial_success=false
# Maximum sentence length, unit byte
--max_allowed_query_size=4194304

########## networking ##########
# Comma separated Meta Server Addresses
Expand Down
2 changes: 2 additions & 0 deletions conf/nebula-graphd.conf.production
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
# Whether to treat partial success as an error.
# This flag is only used for Read-only access, and Modify access always treats partial success as an error.
--accept_partial_success=false
# Maximum sentence length, unit byte
--max_allowed_query_size=4194304

########## networking ##########
# Comma separated Meta Server Addresses
Expand Down
4 changes: 2 additions & 2 deletions src/common/expression/CaseExpression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ const Value& CaseExpression::eval(ExpressionContext& ctx) {
return result_;
}
} else {
if (!when.isBool()) {
if (!when.isBool() && !when.isNull()) {
return Value::kNullBadType;
}
if (when.getBool()) {
if (when.isBool() && when.getBool()) {
result_ = whenThen.then->eval(ctx);
return result_;
}
Expand Down
1 change: 1 addition & 0 deletions src/graph/service/GraphFlags.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ DEFINE_string(auth_type,

DEFINE_string(cloud_http_url, "", "cloud http url including ip, port, url path");
DEFINE_uint32(max_allowed_statements, 512, "Max allowed sequential statements");
DEFINE_uint32(max_allowed_query_size, 4194304, "Max allowed sequential query size");

DEFINE_int64(max_allowed_connections,
std::numeric_limits<int64_t>::max(),
Expand Down
21 changes: 21 additions & 0 deletions src/graph/validator/test/QueryValidatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include "graph/validator/test/ValidatorTestBase.h"

DECLARE_uint32(max_allowed_statements);
DECLARE_uint32(max_allowed_query_size);

namespace nebula {
namespace graph {
Expand Down Expand Up @@ -1118,6 +1119,26 @@ TEST_F(QueryValidatorTest, TestMaxAllowedStatements) {
"exceeded");
}

TEST_F(QueryValidatorTest, TestMaxAllowedQuerySize) {
FLAGS_max_allowed_query_size = 256;
std::string query = "INSERT VERTEX person(name, age) VALUES ";
std::string value = "\"person_1\":(\"person_1\", 1),";
int count = (FLAGS_max_allowed_query_size - query.size()) / value.size();
std::string values;
values.reserve(FLAGS_max_allowed_query_size);
for (int i = 0; i < count; ++i) {
values.append(value);
}
values.erase(values.size() - 1);
query += values;
EXPECT_TRUE(checkResult(query));
query.append(",\"person_2\":(\"person_2\", 2);");
auto result = checkResult(query);
EXPECT_FALSE(result);
EXPECT_EQ(std::string(result.message()), "SyntaxError: Query is too large (282 > 256).");
FLAGS_max_allowed_query_size = 4194304;
}

TEST_F(QueryValidatorTest, TestMatch) {
{
std::string query =
Expand Down
1 change: 1 addition & 0 deletions src/graph/visitor/DeduceTypeVisitor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ static const std::unordered_map<Value::Type, Value> kConstantValues = {
{Value::Type::STRING, Value("123")},
{Value::Type::DATE, Value(Date())},
{Value::Type::DATETIME, Value(DateTime())},
{Value::Type::TIME, Value(Time())},
{Value::Type::VERTEX, Value(Vertex())},
{Value::Type::EDGE, Value(Edge())},
{Value::Type::PATH, Value(Path())},
Expand Down
9 changes: 7 additions & 2 deletions src/parser/GQLParser.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "parser/GraphParser.hpp"
#include "parser/GraphScanner.h"

DECLARE_uint32(max_allowed_query_size);
namespace nebula {

class GQLParser {
Expand Down Expand Up @@ -39,8 +40,12 @@ class GQLParser {
}

StatusOr<std::unique_ptr<Sentence>> parse(std::string query) {
// Since GraphScanner needs a writable buffer, we have to copy the query
// string
// Since GraphScanner needs a writable buffer, we have to copy the query string
size_t querySize = query.size();
size_t maxAllowedQuerySize = static_cast<size_t>(FLAGS_max_allowed_query_size);
if (querySize > maxAllowedQuerySize) {
return Status::SyntaxError("Query is too large (%ld > %ld).", querySize, maxAllowedQuerySize);
}
buffer_ = std::move(query);
pos_ = &buffer_[0];
end_ = pos_ + buffer_.size();
Expand Down
3 changes: 2 additions & 1 deletion src/storage/admin/AdminTaskManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ namespace storage {

bool AdminTaskManager::init() {
LOG(INFO) << "max concurrenct subtasks: " << FLAGS_max_concurrent_subtasks;
pool_ = std::make_unique<ThreadPool>(FLAGS_max_concurrent_subtasks);
auto threadFactory = std::make_shared<folly::NamedThreadFactory>("TaskManager");
pool_ = std::make_unique<ThreadPool>(FLAGS_max_concurrent_subtasks, threadFactory);
bgThread_ = std::make_unique<thread::GenericWorker>();
if (!bgThread_->start()) {
LOG(ERROR) << "background thread start failed";
Expand Down
35 changes: 15 additions & 20 deletions src/storage/admin/RebuildEdgeIndexTask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,13 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac
edgeTypes.emplace(item->get_schema_id().get_edge_type());
}

auto schemasRet = env_->schemaMan_->getAllLatestVerEdgeSchema(space);
if (!schemasRet.ok()) {
LOG(ERROR) << "Get space edge schema failed";
return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND;
}
auto schemas = schemasRet.value();

auto vidSize = vidSizeRet.value();
std::unique_ptr<kvstore::KVIterator> iter;
const auto& prefix = NebulaKeyUtils::edgePrefix(part);
Expand All @@ -53,9 +60,6 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac
return ret;
}

VertexID currentSrcVertex = "";
VertexID currentDstVertex = "";
EdgeRanking currentRanking = 0;
std::vector<kvstore::KV> data;
data.reserve(kReserveNum);
RowReaderWrapper reader;
Expand All @@ -67,7 +71,7 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac
}

if (batchSize >= FLAGS_rebuild_index_batch_size) {
auto result = writeData(space, part, data, batchSize, rateLimiter);
auto result = writeData(space, part, std::move(data), batchSize, rateLimiter);
if (result != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Write Part " << part << " Index Failed";
return result;
Expand Down Expand Up @@ -97,15 +101,6 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac
auto ranking = NebulaKeyUtils::getRank(vidSize, key);
VLOG(3) << "Source " << source << " Destination " << destination << " Ranking " << ranking
<< " Edge Type " << edgeType;
if (currentSrcVertex == source && currentDstVertex == destination &&
currentRanking == ranking) {
iter->next();
continue;
} else {
currentSrcVertex = source.toString();
currentDstVertex = destination.toString();
currentRanking = ranking;
}

reader = RowReaderWrapper::getEdgePropReader(env_->schemaMan_, space, edgeType, val);
if (reader == nullptr) {
Expand All @@ -114,17 +109,17 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac
continue;
}

auto schema = env_->schemaMan_->getEdgeSchema(space, edgeType);
if (!schema) {
auto schemaIter = schemas.find(edgeType);
if (schemaIter == schemas.end()) {
LOG(WARNING) << "Space " << space << ", edge " << edgeType << " invalid";
iter->next();
continue;
}
auto* schema = schemaIter->second.get();

auto ttlProp = CommonUtils::ttlProps(schema.get());
if (ttlProp.first &&
CommonUtils::checkDataExpiredForTTL(
schema.get(), reader.get(), ttlProp.second.second, ttlProp.second.first)) {
auto ttlProp = CommonUtils::ttlProps(schema);
if (ttlProp.first && CommonUtils::checkDataExpiredForTTL(
schema, reader.get(), ttlProp.second.second, ttlProp.second.first)) {
VLOG(3) << "ttl expired : "
<< "Source " << source << " Destination " << destination << " Ranking " << ranking
<< " Edge Type " << edgeType;
Expand All @@ -134,7 +129,7 @@ nebula::cpp2::ErrorCode RebuildEdgeIndexTask::buildIndexGlobal(GraphSpaceID spac

std::string indexVal = "";
if (ttlProp.first) {
auto ttlValRet = CommonUtils::ttlValue(schema.get(), reader.get());
auto ttlValRet = CommonUtils::ttlValue(schema, reader.get());
if (ttlValRet.ok()) {
indexVal = IndexKeyUtils::indexVal(std::move(ttlValRet).value());
}
Expand Down
30 changes: 15 additions & 15 deletions src/storage/admin/RebuildTagIndexTask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,13 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space
tagIds.emplace(item->get_schema_id().get_tag_id());
}

auto schemasRet = env_->schemaMan_->getAllLatestVerTagSchema(space);
if (!schemasRet.ok()) {
LOG(ERROR) << "Get space tag schema failed";
return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND;
}
auto schemas = schemasRet.value();

auto vidSize = vidSizeRet.value();
std::unique_ptr<kvstore::KVIterator> iter;
auto prefix = NebulaKeyUtils::vertexPrefix(part);
Expand All @@ -53,7 +60,6 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space
return ret;
}

VertexID currentVertex = "";
std::vector<kvstore::KV> data;
data.reserve(kReserveNum);
RowReaderWrapper reader;
Expand All @@ -65,7 +71,7 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space
}

if (batchSize >= FLAGS_rebuild_index_batch_size) {
auto result = writeData(space, part, data, batchSize, rateLimiter);
auto result = writeData(space, part, std::move(data), batchSize, rateLimiter);
if (result != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Write Part " << part << " Index Failed";
return result;
Expand All @@ -88,30 +94,24 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space

auto vertex = NebulaKeyUtils::getVertexId(vidSize, key);
VLOG(3) << "Tag ID " << tagID << " Vertex ID " << vertex;
if (currentVertex == vertex) {
iter->next();
continue;
} else {
currentVertex = vertex.toString();
}

reader = RowReaderWrapper::getTagPropReader(env_->schemaMan_, space, tagID, val);
if (reader == nullptr) {
iter->next();
continue;
}

auto schema = env_->schemaMan_->getTagSchema(space, tagID);
if (!schema) {
auto schemaIter = schemas.find(tagID);
if (schemaIter == schemas.end()) {
LOG(WARNING) << "Space " << space << ", tag " << tagID << " invalid";
iter->next();
continue;
}
auto* schema = schemaIter->second.get();

auto ttlProp = CommonUtils::ttlProps(schema.get());
if (ttlProp.first &&
CommonUtils::checkDataExpiredForTTL(
schema.get(), reader.get(), ttlProp.second.second, ttlProp.second.first)) {
auto ttlProp = CommonUtils::ttlProps(schema);
if (ttlProp.first && CommonUtils::checkDataExpiredForTTL(
schema, reader.get(), ttlProp.second.second, ttlProp.second.first)) {
VLOG(3) << "ttl expired : "
<< "Tag ID " << tagID << " Vertex ID " << vertex;
iter->next();
Expand All @@ -120,7 +120,7 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space

std::string indexVal = "";
if (ttlProp.first) {
auto ttlValRet = CommonUtils::ttlValue(schema.get(), reader.get());
auto ttlValRet = CommonUtils::ttlValue(schema, reader.get());
if (ttlValRet.ok()) {
indexVal = IndexKeyUtils::indexVal(std::move(ttlValRet).value());
}
Expand Down
7 changes: 7 additions & 0 deletions tests/tck/features/aggregate/Agg.feature
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,13 @@ Feature: Basic Aggregate and GroupBy
Then the result should be, in any order, with relax comparison:
| COUNT(*) | (1+1) |
| 1 | 2 |
When executing query:
"""
YIELD COUNT(CASE WHEN null THEN null ELSE 1 END) AS nulls
"""
Then the result should be, in any order, with relax comparison:
| nulls |
| 1 |
When executing query:
"""
YIELD COUNT(*)+1 ,1+2 ,(INT)abs(count(2))
Expand Down
7 changes: 7 additions & 0 deletions tests/tck/features/expression/Case.feature
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,13 @@ Feature: Case Expression
Then the result should be, in any order:
| r |
| 1 |
When executing query:
"""
YIELD CASE WHEN null THEN 0 ELSE 1 END AS r
"""
Then the result should be, in any order:
| r |
| 1 |

Scenario: yield conditional case
When executing query:
Expand Down
17 changes: 17 additions & 0 deletions tests/tck/features/expression/FunctionCall.feature
Original file line number Diff line number Diff line change
Expand Up @@ -121,3 +121,20 @@ Feature: Function Call Expression
Then the result should be, in any order:
| result |
| NULL |

Scenario: error check
When executing query:
"""
RETURN timestamp("2000-10-10T10:00:00") + true
"""
Then a SemanticError should be raised at runtime: `(timestamp("2000-10-10T10:00:00")+true)' is not a valid expression, can not apply `+' to `INT' and `BOOL'.
When executing query:
"""
RETURN time("10:00:00") + 3
"""
Then a SemanticError should be raised at runtime: `(time("10:00:00")+3)' is not a valid expression, can not apply `+' to `TIME' and `INT'.
When executing query:
"""
RETURN datetime("2000-10-10T10:00:00") + 3
"""
Then a SemanticError should be raised at runtime: `(datetime("2000-10-10T10:00:00")+3)' is not a valid expression, can not apply `+' to `DATETIME' and `INT'.

0 comments on commit 34c7c09

Please sign in to comment.