From 0475394879a5079397b60959f9201389f87c5386 Mon Sep 17 00:00:00 2001 From: morningman Date: Mon, 4 Sep 2017 15:43:19 +0800 Subject: [PATCH 1/2] add new stmt ALTER SYSTEM ADD FREE BACKEND to add a backend not belongs to any cluster --- build.sh | 6 +- .../Contents/Administration/admin_stmt.md | 15 +++-- .../Data Manipulation/manipulation_stmt.md | 9 +++ .../com/baidu/palo/alter/SystemHandler.java | 2 +- .../baidu/palo/analysis/AddBackendClause.java | 15 ++++- fe/src/com/baidu/palo/catalog/Catalog.java | 63 ++++++++++--------- fe/src/com/baidu/palo/cluster/Cluster.java | 33 +++++----- .../baidu/palo/system/SystemInfoService.java | 41 +++++++++--- gensrc/parser/sql_parser.y | 20 +++--- gensrc/parser/sql_scanner.flex | 1 + 10 files changed, 134 insertions(+), 71 deletions(-) diff --git a/build.sh b/build.sh index 0a71d23e4120b1..f32fc455139848 100755 --- a/build.sh +++ b/build.sh @@ -78,9 +78,9 @@ if [ ${CLEAN_ALL} -eq 1 ]; then fi mkdir -p ${PALO_HOME}/be/build/ cd ${PALO_HOME}/be/build/ -cmake ../ -make -j${PARALLEL} -make install +#cmake ../ +#make -j${PARALLEL} +#make install cd ${PALO_HOME} # Build docs, should be built before Frontend diff --git a/docs/help/Contents/Administration/admin_stmt.md b/docs/help/Contents/Administration/admin_stmt.md index 52b2592711b158..fa65d1374367ea 100644 --- a/docs/help/Contents/Administration/admin_stmt.md +++ b/docs/help/Contents/Administration/admin_stmt.md @@ -1,10 +1,12 @@ # ALTER SYSTEM ## description - 该语句用于操作一个集群内的节点。(仅管理员使用!) + 该语句用于操作一个系统内的节点。(仅管理员使用!) 语法: 1) 增加节点 ALTER SYSTEM ADD BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; + 2) 增加空闲节点 + ALTER SYSTEM ADD FREE BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; 2) 删除节点 ALTER SYSTEM DROP BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; 3) 节点下线 @@ -27,18 +29,21 @@ 1. 增加一个节点 ALTER SYSTEM ADD BACKEND "host:port"; + + 2. 增加一个空闲节点 + ALTER SYSTEM ADD FREE BACKEND "host:port"; - 2. 删除两个节点 + 3. 删除两个节点 ALTER SYSTEM DROP BACKEND "host1:port", "host2:port"; - 3. 下线两个节点 + 4. 下线两个节点 ALTER SYSTEM DECOMMISSION BACKEND "host1:port", "host2:port"; - 4. 增加两个Hdfs Broker + 5. 增加两个Hdfs Broker ALTER SYSTEM ADD BROKER hdfs "host1:port", "host2:port"; ## keyword - ALTER,SYSTEM,BACKEND,BROKER + ALTER,SYSTEM,BACKEND,BROKER,FREE # CANCEL ALTER SYSTEM ## description diff --git a/docs/help/Contents/Data Manipulation/manipulation_stmt.md b/docs/help/Contents/Data Manipulation/manipulation_stmt.md index 7a989d6c6f87fb..cb984a4cfd3723 100644 --- a/docs/help/Contents/Data Manipulation/manipulation_stmt.md +++ b/docs/help/Contents/Data Manipulation/manipulation_stmt.md @@ -762,3 +762,12 @@ ## keyword SHOW, RESTORE + +# SHOW BACKENDS +## description + 该语句用于查看cluster内的节点 + 语法: + SHOW BACKENDS + +## keyword + SHOW, BACKENDS \ No newline at end of file diff --git a/fe/src/com/baidu/palo/alter/SystemHandler.java b/fe/src/com/baidu/palo/alter/SystemHandler.java index 43a0fc7cb3c67b..f249708c06a7df 100644 --- a/fe/src/com/baidu/palo/alter/SystemHandler.java +++ b/fe/src/com/baidu/palo/alter/SystemHandler.java @@ -145,7 +145,7 @@ public synchronized void process(List alterClauses, String clusterN if (alterClause instanceof AddBackendClause) { AddBackendClause addBackendClause = (AddBackendClause) alterClause; - Catalog.getCurrentSystemInfo().addBackends(addBackendClause.getHostPortPairs()); + Catalog.getCurrentSystemInfo().addBackends(addBackendClause.getHostPortPairs(), addBackendClause.isFree()); } else if (alterClause instanceof DropBackendClause) { DropBackendClause dropBackendClause = (DropBackendClause) alterClause; if (!dropBackendClause.isForce()) { diff --git a/fe/src/com/baidu/palo/analysis/AddBackendClause.java b/fe/src/com/baidu/palo/analysis/AddBackendClause.java index 97dec2687b1d8b..0af5daa13c2e8f 100644 --- a/fe/src/com/baidu/palo/analysis/AddBackendClause.java +++ b/fe/src/com/baidu/palo/analysis/AddBackendClause.java @@ -19,8 +19,12 @@ public class AddBackendClause extends BackendClause { - public AddBackendClause(List hostPorts) { + // be in free state is not owned by any cluster + protected boolean isFree; + + public AddBackendClause(List hostPorts, boolean isFree) { super(hostPorts); + this.isFree = isFree; } @Override @@ -35,4 +39,13 @@ public String toSql() { } return sb.toString(); } + + + public void setFree(boolean isFree) { + this.isFree = isFree; + } + + public boolean isFree() { + return this.isFree; + } } diff --git a/fe/src/com/baidu/palo/catalog/Catalog.java b/fe/src/com/baidu/palo/catalog/Catalog.java index f1f9f4c931713e..0ddcc37ebd3b26 100644 --- a/fe/src/com/baidu/palo/catalog/Catalog.java +++ b/fe/src/com/baidu/palo/catalog/Catalog.java @@ -4422,14 +4422,6 @@ public void createCluster(CreateClusterStmt stmt) throws DdlException { } private void unprotectCreateCluster(Cluster cluster) { - if (cluster.getName().equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - if (cluster.getBackendIdList().isEmpty()) { - isDefaultClusterCreated = true; - // ignore default_cluster - return; - } - } - final Iterator iterator = cluster.getBackendIdList().iterator(); while (iterator.hasNext()) { final Long id = iterator.next(); @@ -4921,12 +4913,24 @@ public Set getMigrations() { public long loadCluster(DataInputStream dis, long checksum) throws IOException, DdlException { if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) { - int dbCount = dis.readInt(); - checksum ^= dbCount; - for (long i = 0; i < dbCount; ++i) { + int clusterCount = dis.readInt(); + checksum ^= clusterCount; + for (long i = 0; i < clusterCount; ++i) { final Cluster cluster = new Cluster(); cluster.readFields(dis); checksum ^= cluster.getId(); + + // BE is in default_cluster when added , therefore it is possible that the BE + // in default_clsuter are not the latest because cluster cant't be updated when + // loadCluster is after loadBackend. + List latestBackendIds = systemInfo.getClusterBackendIds(cluster.getName()); + if (cluster.getName().equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { + cluster.setBackendIdList(latestBackendIds); + } else { + // The cluster has the same number of be as systeminfo recorded + Preconditions.checkState(latestBackendIds.size() == cluster.getBackendIdList().size()); + } + final InfoSchemaDb db = new InfoSchemaDb(cluster.getName()); db.setClusterName(cluster.getName()); idToDb.put(db.getId(), db); @@ -4951,28 +4955,27 @@ private void initDefaultCluster() { cluster.setName(SystemInfoService.DEFAULT_CLUSTER); cluster.setId(id); - if (backendList.size() != 0) { - // make sure one host hold only one backend. - Set beHost = Sets.newHashSet(); - for (Backend be : defaultClusterBackends) { - if (beHost.contains(be.getHost())) { - // we can not handle this situation automatically. - LOG.error("found more than one backends in same host: {}", be.getHost()); - System.exit(-1); - } else { - beHost.add(be.getHost()); - } + // make sure one host hold only one backend. + Set beHost = Sets.newHashSet(); + for (Backend be : defaultClusterBackends) { + if (beHost.contains(be.getHost())) { + // we can not handle this situation automatically. + LOG.error("found more than one backends in same host: {}", be.getHost()); + System.exit(-1); + } else { + beHost.add(be.getHost()); } + } - // we create default_cluster only if we had existing backends. - // this could only happend when we upgrade Palo from version 2.4 to 3.x. - cluster.setBackendIdList(backendList); - unprotectCreateCluster(cluster); - for (Database db : idToDb.values()) { - db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); - cluster.addDb(db.getFullName(), db.getId()); - } + // we create default_cluster to meet the need for ease of use, because + // most users hava no multi tenant needs. + cluster.setBackendIdList(backendList); + unprotectCreateCluster(cluster); + for (Database db : idToDb.values()) { + db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); + cluster.addDb(db.getFullName(), db.getId()); } + // no matter default_cluster is created or not, // mark isDefaultClusterCreated as true isDefaultClusterCreated = true; diff --git a/fe/src/com/baidu/palo/cluster/Cluster.java b/fe/src/com/baidu/palo/cluster/Cluster.java index 0c798b0788290b..92daecf226f076 100644 --- a/fe/src/com/baidu/palo/cluster/Cluster.java +++ b/fe/src/com/baidu/palo/cluster/Cluster.java @@ -51,7 +51,7 @@ public class Cluster implements Writable { private Long id; private String name; // backend which cluster own - private List backendIdList; + private Set backendIdSet; private Set userIdSet; private Set userNameSet; @@ -66,7 +66,7 @@ public class Cluster implements Writable { public Cluster() { this.rwLock = new ReentrantReadWriteLock(true); - this.backendIdList = new LinkedList(); + this.backendIdSet = Sets.newHashSet(); this.userIdSet = Sets.newHashSet(); this.userNameSet = Sets.newHashSet(); this.linkDbNames = Maps.newHashMap(); @@ -79,7 +79,7 @@ public Cluster(String name, long id) { this.name = name; this.id = id; this.rwLock = new ReentrantReadWriteLock(true); - this.backendIdList = new LinkedList(); + this.backendIdSet = Sets.newHashSet(); this.userIdSet = Sets.newHashSet(); this.userNameSet = Sets.newHashSet(); this.linkDbNames = Maps.newHashMap(); @@ -230,36 +230,36 @@ public boolean isWriteLockHeldByCurrentThread() { } public int getClusterCapacity() { - return backendIdList.size(); + return backendIdSet.size(); } public List getBackendIdList() { - return Lists.newArrayList(backendIdList); + return Lists.newArrayList(backendIdSet); } public void setBackendIdList(List backendIdList) { writeLock(); try { - this.backendIdList = backendIdList; + this.backendIdSet = Sets.newHashSet(backendIdList); } finally { writeUnlock(); } } - public void addBackend(long id) { + public void addBackend(long backendId) { writeLock(); try { - this.backendIdList.add(id); + this.backendIdSet.add(backendId); } finally { writeUnlock(); } } - public void addBackends(List backends) { + public void addBackends(List backendIds) { writeLock(); try { - this.backendIdList.addAll(backends); - } finally { + this.backendIdSet.addAll(backendIds); + } finally { writeUnlock(); } } @@ -285,8 +285,8 @@ public void write(DataOutput out) throws IOException { out.writeLong(id); Text.writeString(out, name); - out.writeLong(backendIdList.size()); - for (Long id : backendIdList) { + out.writeLong(backendIdSet.size()); + for (Long id : backendIdSet) { out.writeLong(id); } @@ -329,9 +329,8 @@ public void readFields(DataInput in) throws IOException { Long len = in.readLong(); while (len-- > 0) { Long id = in.readLong(); - backendIdList.add(id); + backendIdSet.add(id); } - int count = in.readInt(); while (count-- > 0) { dbNames.add(Text.readString(in)); @@ -362,7 +361,7 @@ public void readFields(DataInput in) throws IOException { public void removeBackend(long removedBackendId) { writeLock(); try { - backendIdList.remove((Long)removedBackendId); + backendIdSet.remove((Long)removedBackendId); } finally { writeUnlock(); } @@ -371,7 +370,7 @@ public void removeBackend(long removedBackendId) { public void removeBackends(List removedBackendIds) { writeLock(); try { - backendIdList.remove(removedBackendIds); + backendIdSet.remove(removedBackendIds); } finally { writeUnlock(); } diff --git a/fe/src/com/baidu/palo/system/SystemInfoService.java b/fe/src/com/baidu/palo/system/SystemInfoService.java index 119addc609b839..59655126a43409 100644 --- a/fe/src/com/baidu/palo/system/SystemInfoService.java +++ b/fe/src/com/baidu/palo/system/SystemInfoService.java @@ -56,6 +56,7 @@ import com.baidu.palo.thrift.TNetworkAddress; import com.baidu.palo.thrift.TStatusCode; import com.google.common.base.Strings; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; @@ -128,7 +129,7 @@ public void setMaster(String masterHost, int masterPort, int clusterId, long epo masterInfo.set(tMasterInfo); } - public void addBackends(List> hostPortPairs) throws DdlException { + public void addBackends(List> hostPortPairs, boolean isFree) throws DdlException { for (Pair pair : hostPortPairs) { // check is already exist if (getBackendWithHeartbeatPort(pair.first, pair.second) != null) { @@ -137,7 +138,7 @@ public void addBackends(List> hostPortPairs) throws DdlExc } for (Pair pair : hostPortPairs) { - addBackend(pair.first, pair.second); + addBackend(pair.first, pair.second, isFree); } } @@ -149,7 +150,7 @@ public void addBackend(Backend backend) { idToBackendRef.set(newIdToBackend); } - private void addBackend(String host, int heartbeatPort) throws DdlException { + private void addBackend(String host, int heartbeatPort, boolean isFree) throws DdlException { Backend newBackend = new Backend(Catalog.getInstance().getNextId(), host, heartbeatPort); // update idToBackend Map copiedBackends = Maps.newHashMap(idToBackendRef.get()); @@ -170,8 +171,17 @@ private void addBackend(String host, int heartbeatPort) throws DdlException { copiedHeartbeatHandlersMap.put(newBackend.getId(), heartbeatHandler); ImmutableMap newIdToHeartbeatHandler = ImmutableMap.copyOf(copiedHeartbeatHandlersMap); idToHeartbeatHandlerRef.set(newIdToHeartbeatHandler); - - // log + + // to add be to DEFAULT_CLUSTER + if (!isFree) { + final Cluster cluster = Catalog.getInstance().getCluster(DEFAULT_CLUSTER); + Preconditions.checkState(cluster != null); + cluster.addBackend(newBackend.getId()); + newBackend.setOwnerClusterName(DEFAULT_CLUSTER); + newBackend.setBackendState(BackendState.using); + } + + // log Catalog.getInstance().getEditLog().logAddBackend(newBackend); LOG.info("add backend[" + newBackend.getId() + ". " + newBackend.getHost() + ":" + newBackend.getHeartbeatPort() + ":" + newBackend.getBePort() + ":" + newBackend.getBePort() + ":" + newBackend.getHttpPort() + "]"); @@ -241,6 +251,8 @@ private void dropBackend(String host, int heartbeatPort) throws DdlException { final Cluster cluster = Catalog.getInstance().getCluster(droppedBackend.getOwnerClusterName()); if (null != cluster) { cluster.removeBackend(droppedBackend.getId()); + } else { + LOG.error("Cluster " + droppedBackend.getOwnerClusterName() + " no exist."); } // log Catalog.getInstance().getEditLog().logDropBackend(droppedBackend); @@ -280,7 +292,7 @@ public Backend getBackendWithHeartbeatPort(String host, int heartPort) { } return null; } - + public Backend getBackendWithBePort(String host, int bePort) { ImmutableMap idToBackend = idToBackendRef.get(); for (Backend backend : idToBackend.values()) { @@ -997,7 +1009,19 @@ public void replayAddBackend(Backend newBackend) { copiedHeartbeatHandlersMap.put(newBackend.getId(), heartbeatHandler); ImmutableMap newIdToHeartbeatHandler = ImmutableMap.copyOf(copiedHeartbeatHandlersMap); idToHeartbeatHandlerRef.set(newIdToHeartbeatHandler); - } + + // to add be to DEFAULT_CLUSTER + if (newBackend.getBackendState() == BackendState.using) { + final Cluster cluster = Catalog.getInstance().getCluster(DEFAULT_CLUSTER); + if (null != cluster) { + // replay log + cluster.addBackend(newBackend.getId()); + } else { + // This happens in loading image when fe is restarted, because loadCluster is after loadBackend, + // cluster is not created. Be in cluster will be updated in loadCluster. + } + } + } public void replayDropBackend(Backend backend) { LOG.debug("replayDropBackend: {}", backend); @@ -1018,10 +1042,13 @@ public void replayDropBackend(Backend backend) { copiedHeartbeatHandlersMap.remove(backend.getId()); ImmutableMap newIdToHeartbeatHandler = ImmutableMap.copyOf(copiedHeartbeatHandlersMap); idToHeartbeatHandlerRef.set(newIdToHeartbeatHandler); + // update cluster final Cluster cluster = Catalog.getInstance().getCluster(backend.getOwnerClusterName()); if (null != cluster) { cluster.removeBackend(backend.getId()); + } else { + LOG.error("Cluster " + backend.getOwnerClusterName() + " no exist."); } } diff --git a/gensrc/parser/sql_parser.y b/gensrc/parser/sql_parser.y index e347c3e8e60d88..9ded411666c18b 100644 --- a/gensrc/parser/sql_parser.y +++ b/gensrc/parser/sql_parser.y @@ -195,21 +195,21 @@ parser code {: // Total keywords of palo terminal String KW_ADD, KW_AFTER, KW_AGGREGATE, KW_ALL, KW_ALTER, KW_AND, KW_ANTI, KW_AS, KW_ASC, KW_AUTHORS, KW_BACKEND, KW_BACKUP, KW_BETWEEN, KW_BEGIN, KW_BIGINT, KW_BOOLEAN, KW_BOTH, KW_BROKER, KW_BACKENDS, KW_BY, - KW_CANCEL, KW_CASE, KW_CAST, KW_CHAIN, KW_CHAR, KW_CHARSET, KW_SYSTEM, KW_CLUSTER, KW_CLUSTERS, KW_LINK, KW_MIGRATE, KW_MIGRATIONS, KW_ENTER, + KW_CANCEL, KW_CASE, KW_CAST, KW_CHAIN, KW_CHAR, KW_CHARSET, KW_CLUSTER, KW_CLUSTERS, KW_COLLATE, KW_COLLATION, KW_COLUMN, KW_COLUMNS, KW_COMMENT, KW_COMMIT, KW_COMMITTED, KW_CONNECTION, KW_CONNECTION_ID, KW_CONSISTENT, KW_COUNT, KW_CREATE, KW_CROSS, KW_CURRENT, KW_CURRENT_USER, KW_DATA, KW_DATABASE, KW_DATABASES, KW_DATE, KW_DATETIME, KW_DECIMAL, KW_DECOMMISSION, KW_DEFAULT, KW_DESC, KW_DESCRIBE, KW_DELETE, KW_DISTINCT, KW_DISTINCTPC, KW_DISTINCTPCSA, KW_DISTRIBUTED, KW_BUCKETS, KW_DIV, KW_DOUBLE, KW_DROP, KW_DROPP, KW_DUPLICATE, - KW_ELSE, KW_END, KW_ENGINE, KW_ENGINES, KW_ERRORS, KW_EVENTS, KW_EXISTS, KW_EXPORT, KW_EXTERNAL, KW_EXTRACT, - KW_FALSE, KW_FOLLOWER, KW_FOLLOWING, KW_FROM, KW_FIRST, KW_FLOAT, KW_FOR, KW_FULL, KW_FUNCTION, + KW_ELSE, KW_END, KW_ENGINE, KW_ENGINES, KW_ENTER, KW_ERRORS, KW_EVENTS, KW_EXISTS, KW_EXPORT, KW_EXTERNAL, KW_EXTRACT, + KW_FALSE, KW_FOLLOWER, KW_FOLLOWING, KW_FREE, KW_FROM, KW_FIRST, KW_FLOAT, KW_FOR, KW_FULL, KW_FUNCTION, KW_GLOBAL, KW_GRANT, KW_GROUP, KW_HASH, KW_HAVING, KW_HELP,KW_HLL, KW_HLL_UNION, KW_IDENTIFIED, KW_IF, KW_IN, KW_INDEX, KW_INDEXES, KW_INFILE, KW_INNER, KW_INSERT, KW_INT, KW_INTERVAL, KW_INTO, KW_IS, KW_ISNULL, KW_ISOLATION, KW_JOIN, KW_KEY, KW_KILL, - KW_LABEL, KW_LARGEINT, KW_LEFT, KW_LESS, KW_LEVEL, KW_LIKE, KW_LIMIT, KW_LOAD, KW_LOCAL, - KW_MAX, KW_MAX_VALUE, KW_MERGE, KW_MIN, KW_MODIFY, + KW_LABEL, KW_LARGEINT, KW_LEFT, KW_LESS, KW_LEVEL, KW_LIKE, KW_LIMIT, KW_LINK, KW_LOAD, KW_LOCAL, + KW_MAX, KW_MAX_VALUE, KW_MERGE, KW_MIN, KW_MIGRATE, KW_MIGRATIONS, KW_MODIFY, KW_NAME, KW_NAMES, KW_NEGATIVE, KW_NO, KW_NOT, KW_NULL, KW_OBSERVER, KW_OFFSET, KW_ON, KW_ONLY, KW_OPEN, KW_OR, KW_ORDER, KW_OUTER, KW_OVER, KW_PARTITION, KW_PARTITIONS, KW_PRECEDING, @@ -222,7 +222,7 @@ terminal String KW_ADD, KW_AFTER, KW_AGGREGATE, KW_ALL, KW_ALTER, KW_AND, KW_ANT KW_RIGHT, KW_ROLLBACK, KW_ROLLUP, KW_ROW, KW_ROWS, KW_SELECT, KW_SEMI, KW_SERIALIZABLE, KW_SESSION, KW_SET, KW_SHOW, KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, KW_STORAGE, KW_STRING, - KW_SUM, KW_SUPERUSER, KW_SYNC, + KW_SUM, KW_SUPERUSER, KW_SYNC, KW_SYSTEM, KW_TABLE, KW_TABLES, KW_TABLET, KW_TERMINATED, KW_THAN, KW_THEN, KW_TIMESTAMP, KW_TINYINT, KW_TO, KW_TRANSACTION, KW_TRIGGERS, KW_TRIM, KW_TRUE, KW_TYPES, KW_UNCOMMITTED, KW_UNBOUNDED, KW_UNION, KW_UNIQUE, KW_UNSIGNED, KW_USE, KW_USER, KW_USING, @@ -680,7 +680,11 @@ alter_table_clause ::= alter_system_clause ::= KW_ADD KW_BACKEND string_list:hostPorts {: - RESULT = new AddBackendClause(hostPorts); + RESULT = new AddBackendClause(hostPorts, false); + :} + | KW_ADD KW_FREE KW_BACKEND string_list:hostPorts + {: + RESULT = new AddBackendClause(hostPorts, true); :} | KW_DROP KW_BACKEND string_list:hostPorts {: @@ -3602,6 +3606,8 @@ keyword ::= {: RESULT = id; :} | KW_MAX:id {: RESULT = id; :} + | KW_FREE:id + {: RESULT = id; :} ; // Identifier that contain keyword diff --git a/gensrc/parser/sql_scanner.flex b/gensrc/parser/sql_scanner.flex index 4b5fccd35dce03..1285adacdd780a 100644 --- a/gensrc/parser/sql_scanner.flex +++ b/gensrc/parser/sql_scanner.flex @@ -92,6 +92,7 @@ import com.baidu.palo.common.util.SqlUtils; keywordMap.put("charset", new Integer(SqlParserSymbols.KW_CHARSET)); keywordMap.put("cluster", new Integer(SqlParserSymbols.KW_CLUSTER)); keywordMap.put("clusters", new Integer(SqlParserSymbols.KW_CLUSTERS)); + keywordMap.put("free", new Integer(SqlParserSymbols.KW_FREE)); keywordMap.put("system", new Integer(SqlParserSymbols.KW_SYSTEM)); keywordMap.put("link", new Integer(SqlParserSymbols.KW_LINK)); keywordMap.put("migrate", new Integer(SqlParserSymbols.KW_MIGRATE)); From 92c44bb0c71650a9e544a7d5d7b1228daf8c181b Mon Sep 17 00:00:00 2001 From: morningman Date: Mon, 4 Sep 2017 15:44:48 +0800 Subject: [PATCH 2/2] Update build.sh --- build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build.sh b/build.sh index f32fc455139848..0a71d23e4120b1 100755 --- a/build.sh +++ b/build.sh @@ -78,9 +78,9 @@ if [ ${CLEAN_ALL} -eq 1 ]; then fi mkdir -p ${PALO_HOME}/be/build/ cd ${PALO_HOME}/be/build/ -#cmake ../ -#make -j${PARALLEL} -#make install +cmake ../ +make -j${PARALLEL} +make install cd ${PALO_HOME} # Build docs, should be built before Frontend