Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add new stmt ALTER SYSTEM ADD FREE BACKEND to add a backend not belon… #67

Merged
merged 2 commits into from
Sep 4, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions docs/help/Contents/Administration/admin_stmt.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
# ALTER SYSTEM
## description

该语句用于操作一个集群内的节点。(仅管理员使用!)
该语句用于操作一个系统内的节点。(仅管理员使用!)
语法:
1) 增加节点
ALTER SYSTEM ADD BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...];
2) 增加空闲节点
ALTER SYSTEM ADD FREE BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...];
2) 删除节点
ALTER SYSTEM DROP BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...];
3) 节点下线
Expand All @@ -27,18 +29,21 @@

1. 增加一个节点
ALTER SYSTEM ADD BACKEND "host:port";

2. 增加一个空闲节点
ALTER SYSTEM ADD FREE BACKEND "host:port";

2. 删除两个节点
3. 删除两个节点
ALTER SYSTEM DROP BACKEND "host1:port", "host2:port";

3. 下线两个节点
4. 下线两个节点
ALTER SYSTEM DECOMMISSION BACKEND "host1:port", "host2:port";

4. 增加两个Hdfs Broker
5. 增加两个Hdfs Broker
ALTER SYSTEM ADD BROKER hdfs "host1:port", "host2:port";

## keyword
ALTER,SYSTEM,BACKEND,BROKER
ALTER,SYSTEM,BACKEND,BROKER,FREE

# CANCEL ALTER SYSTEM
## description
Expand Down
9 changes: 9 additions & 0 deletions docs/help/Contents/Data Manipulation/manipulation_stmt.md
Original file line number Diff line number Diff line change
Expand Up @@ -762,3 +762,12 @@

## keyword
SHOW, RESTORE

# SHOW BACKENDS
## description
该语句用于查看cluster内的节点
语法:
SHOW BACKENDS

## keyword
SHOW, BACKENDS
2 changes: 1 addition & 1 deletion fe/src/com/baidu/palo/alter/SystemHandler.java
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ public synchronized void process(List<AlterClause> alterClauses, String clusterN

if (alterClause instanceof AddBackendClause) {
AddBackendClause addBackendClause = (AddBackendClause) alterClause;
Catalog.getCurrentSystemInfo().addBackends(addBackendClause.getHostPortPairs());
Catalog.getCurrentSystemInfo().addBackends(addBackendClause.getHostPortPairs(), addBackendClause.isFree());
} else if (alterClause instanceof DropBackendClause) {
DropBackendClause dropBackendClause = (DropBackendClause) alterClause;
if (!dropBackendClause.isForce()) {
Expand Down
15 changes: 14 additions & 1 deletion fe/src/com/baidu/palo/analysis/AddBackendClause.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,12 @@

public class AddBackendClause extends BackendClause {

public AddBackendClause(List<String> hostPorts) {
// be in free state is not owned by any cluster
protected boolean isFree;

public AddBackendClause(List<String> hostPorts, boolean isFree) {
super(hostPorts);
this.isFree = isFree;
}

@Override
Expand All @@ -35,4 +39,13 @@ public String toSql() {
}
return sb.toString();
}


public void setFree(boolean isFree) {
this.isFree = isFree;
}

public boolean isFree() {
return this.isFree;
}
}
63 changes: 33 additions & 30 deletions fe/src/com/baidu/palo/catalog/Catalog.java
Original file line number Diff line number Diff line change
Expand Up @@ -4422,14 +4422,6 @@ public void createCluster(CreateClusterStmt stmt) throws DdlException {
}

private void unprotectCreateCluster(Cluster cluster) {
if (cluster.getName().equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) {
if (cluster.getBackendIdList().isEmpty()) {
isDefaultClusterCreated = true;
// ignore default_cluster
return;
}
}

final Iterator<Long> iterator = cluster.getBackendIdList().iterator();
while (iterator.hasNext()) {
final Long id = iterator.next();
Expand Down Expand Up @@ -4921,12 +4913,24 @@ public Set<BaseParam> getMigrations() {

public long loadCluster(DataInputStream dis, long checksum) throws IOException, DdlException {
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) {
int dbCount = dis.readInt();
checksum ^= dbCount;
for (long i = 0; i < dbCount; ++i) {
int clusterCount = dis.readInt();
checksum ^= clusterCount;
for (long i = 0; i < clusterCount; ++i) {
final Cluster cluster = new Cluster();
cluster.readFields(dis);
checksum ^= cluster.getId();

// BE is in default_cluster when added , therefore it is possible that the BE
// in default_clsuter are not the latest because cluster cant't be updated when
// loadCluster is after loadBackend.
List<Long> latestBackendIds = systemInfo.getClusterBackendIds(cluster.getName());
if (cluster.getName().equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) {
cluster.setBackendIdList(latestBackendIds);
} else {
// The cluster has the same number of be as systeminfo recorded
Preconditions.checkState(latestBackendIds.size() == cluster.getBackendIdList().size());
}

final InfoSchemaDb db = new InfoSchemaDb(cluster.getName());
db.setClusterName(cluster.getName());
idToDb.put(db.getId(), db);
Expand All @@ -4951,28 +4955,27 @@ private void initDefaultCluster() {
cluster.setName(SystemInfoService.DEFAULT_CLUSTER);
cluster.setId(id);

if (backendList.size() != 0) {
// make sure one host hold only one backend.
Set<String> beHost = Sets.newHashSet();
for (Backend be : defaultClusterBackends) {
if (beHost.contains(be.getHost())) {
// we can not handle this situation automatically.
LOG.error("found more than one backends in same host: {}", be.getHost());
System.exit(-1);
} else {
beHost.add(be.getHost());
}
// make sure one host hold only one backend.
Set<String> beHost = Sets.newHashSet();
for (Backend be : defaultClusterBackends) {
if (beHost.contains(be.getHost())) {
// we can not handle this situation automatically.
LOG.error("found more than one backends in same host: {}", be.getHost());
System.exit(-1);
} else {
beHost.add(be.getHost());
}
}

// we create default_cluster only if we had existing backends.
// this could only happend when we upgrade Palo from version 2.4 to 3.x.
cluster.setBackendIdList(backendList);
unprotectCreateCluster(cluster);
for (Database db : idToDb.values()) {
db.setClusterName(SystemInfoService.DEFAULT_CLUSTER);
cluster.addDb(db.getFullName(), db.getId());
}
// we create default_cluster to meet the need for ease of use, because
// most users hava no multi tenant needs.
cluster.setBackendIdList(backendList);
unprotectCreateCluster(cluster);
for (Database db : idToDb.values()) {
db.setClusterName(SystemInfoService.DEFAULT_CLUSTER);
cluster.addDb(db.getFullName(), db.getId());
}

// no matter default_cluster is created or not,
// mark isDefaultClusterCreated as true
isDefaultClusterCreated = true;
Expand Down
33 changes: 16 additions & 17 deletions fe/src/com/baidu/palo/cluster/Cluster.java
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public class Cluster implements Writable {
private Long id;
private String name;
// backend which cluster own
private List<Long> backendIdList;
private Set<Long> backendIdSet;

private Set<Long> userIdSet;
private Set<String> userNameSet;
Expand All @@ -66,7 +66,7 @@ public class Cluster implements Writable {

public Cluster() {
this.rwLock = new ReentrantReadWriteLock(true);
this.backendIdList = new LinkedList<Long>();
this.backendIdSet = Sets.newHashSet();
this.userIdSet = Sets.newHashSet();
this.userNameSet = Sets.newHashSet();
this.linkDbNames = Maps.newHashMap();
Expand All @@ -79,7 +79,7 @@ public Cluster(String name, long id) {
this.name = name;
this.id = id;
this.rwLock = new ReentrantReadWriteLock(true);
this.backendIdList = new LinkedList<Long>();
this.backendIdSet = Sets.newHashSet();
this.userIdSet = Sets.newHashSet();
this.userNameSet = Sets.newHashSet();
this.linkDbNames = Maps.newHashMap();
Expand Down Expand Up @@ -230,36 +230,36 @@ public boolean isWriteLockHeldByCurrentThread() {
}

public int getClusterCapacity() {
return backendIdList.size();
return backendIdSet.size();
}

public List<Long> getBackendIdList() {
return Lists.newArrayList(backendIdList);
return Lists.newArrayList(backendIdSet);
}

public void setBackendIdList(List<Long> backendIdList) {
writeLock();
try {
this.backendIdList = backendIdList;
this.backendIdSet = Sets.newHashSet(backendIdList);
} finally {
writeUnlock();
}
}

public void addBackend(long id) {
public void addBackend(long backendId) {
writeLock();
try {
this.backendIdList.add(id);
this.backendIdSet.add(backendId);
} finally {
writeUnlock();
}
}

public void addBackends(List<Long> backends) {
public void addBackends(List<Long> backendIds) {
writeLock();
try {
this.backendIdList.addAll(backends);
} finally {
this.backendIdSet.addAll(backendIds);
} finally {
writeUnlock();
}
}
Expand All @@ -285,8 +285,8 @@ public void write(DataOutput out) throws IOException {
out.writeLong(id);
Text.writeString(out, name);

out.writeLong(backendIdList.size());
for (Long id : backendIdList) {
out.writeLong(backendIdSet.size());
for (Long id : backendIdSet) {
out.writeLong(id);
}

Expand Down Expand Up @@ -329,9 +329,8 @@ public void readFields(DataInput in) throws IOException {
Long len = in.readLong();
while (len-- > 0) {
Long id = in.readLong();
backendIdList.add(id);
backendIdSet.add(id);
}

int count = in.readInt();
while (count-- > 0) {
dbNames.add(Text.readString(in));
Expand Down Expand Up @@ -362,7 +361,7 @@ public void readFields(DataInput in) throws IOException {
public void removeBackend(long removedBackendId) {
writeLock();
try {
backendIdList.remove((Long)removedBackendId);
backendIdSet.remove((Long)removedBackendId);
} finally {
writeUnlock();
}
Expand All @@ -371,7 +370,7 @@ public void removeBackend(long removedBackendId) {
public void removeBackends(List<Long> removedBackendIds) {
writeLock();
try {
backendIdList.remove(removedBackendIds);
backendIdSet.remove(removedBackendIds);
} finally {
writeUnlock();
}
Expand Down
Loading