Skip to content
This repository has been archived by the owner on Nov 9, 2020. It is now read-only.

allow user to choose ETCD ports for vFile usage #1988

Merged
merged 4 commits into from
Nov 26, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 48 additions & 13 deletions client_plugin/drivers/vfile/kvstore/etcdops/etcdops.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ import (
)

/*
etcdClientPort: port for etcd clients to talk to the peers
etcdPeerPort: port for etcd peers talk to each other
defaultEtcdClientPort: default port for etcd clients to talk to the peers
defaultEtcdPeerPort: default port for etcd peers talk to each other
etcdClusterToken: ID of the cluster to create/join
etcdListenURL: etcd listening interface
etcdScheme: Protocol used for communication
Expand All @@ -57,8 +57,8 @@ import (
*/
const (
etcdDataDir = "/etcd-data"
etcdClientPort = ":2379"
etcdPeerPort = ":2380"
defaultEtcdClientPort = ":2379"
defaultEtcdPeerPort = ":2380"
etcdClusterToken = "vfile-etcd-cluster"
etcdListenURL = "0.0.0.0"
etcdScheme = "http://"
Expand All @@ -85,6 +85,10 @@ type EtcdKVS struct {
etcdCMD *exec.Cmd
// watcher is used for killing the watch request when node is demoted
watcher *etcdClient.Client
// etcdClientPort is the port for etcd clients to talk to the peers
etcdClientPort string
// etcdPeerPort is port for etcd peers talk to each other
etcdPeerPort string
}

// VFileVolConnectivityData - Contains metadata of vFile volumes
Expand All @@ -108,11 +112,15 @@ func NewKvStore(dockerOps *dockerops.DockerOps) *EtcdKVS {
return nil
}

etcdClientPort, etcdPeerPort := getEtcdPorts()

e = &EtcdKVS{
dockerOps: dockerOps,
nodeID: nodeID,
nodeAddr: addr,
isManager: isManager,
dockerOps: dockerOps,
nodeID: nodeID,
nodeAddr: addr,
isManager: isManager,
etcdClientPort: etcdClientPort,
etcdPeerPort: etcdPeerPort,
}

if !isManager {
Expand Down Expand Up @@ -189,10 +197,31 @@ func NewKvStore(dockerOps *dockerops.DockerOps) *EtcdKVS {
return e
}

func getEtcdPorts() (string, string) {
etcdClientPort := os.Getenv("VFILE_ETCD_CLIENT_PORT")
etcdPeerPort := os.Getenv("VFILE_ETCD_PEER_PORT")

if etcdClientPort == "" {
etcdClientPort = defaultEtcdClientPort
} else {
etcdClientPort = ":" + etcdClientPort
}

if etcdPeerPort == "" {
etcdPeerPort = defaultEtcdPeerPort
} else {
etcdPeerPort = ":" + etcdPeerPort
}
log.Infof("getEtcdPorts: clientPort=%s peerPort=%s", etcdClientPort, etcdPeerPort)
return etcdClientPort, etcdPeerPort
}

// rejoinEtcdCluster function is called when a node need to rejoin a ETCD cluster
func (e *EtcdKVS) rejoinEtcdCluster() error {
nodeID := e.nodeID
nodeAddr := e.nodeAddr
etcdClientPort := e.etcdClientPort
etcdPeerPort := e.etcdPeerPort
log.Infof("rejoinEtcdCluster on node with nodeID %s and nodeAddr %s", nodeID, nodeAddr)
lines := []string{
"--name", nodeID,
Expand All @@ -218,6 +247,8 @@ func (e *EtcdKVS) rejoinEtcdCluster() error {
func (e *EtcdKVS) startEtcdCluster() error {
nodeID := e.nodeID
nodeAddr := e.nodeAddr
etcdClientPort := e.etcdClientPort
etcdPeerPort := e.etcdPeerPort
log.Infof("startEtcdCluster on node with nodeID %s and nodeAddr %s", nodeID, nodeAddr)

files, err := filepath.Glob(etcdDataDir)
Expand Down Expand Up @@ -256,6 +287,8 @@ func (e *EtcdKVS) startEtcdCluster() error {
func (e *EtcdKVS) joinEtcdCluster() error {
nodeAddr := e.nodeAddr
nodeID := e.nodeID
etcdClientPort := e.etcdClientPort
etcdPeerPort := e.etcdPeerPort
log.Infof("joinEtcdCluster on node with nodeID %s and nodeAddr %s", nodeID, nodeAddr)

leaderAddr, err := e.dockerOps.GetSwarmLeader()
Expand All @@ -268,7 +301,7 @@ func (e *EtcdKVS) joinEtcdCluster() error {
return err
}

etcd, err := addrToEtcdClient(leaderAddr)
etcd, err := e.addrToEtcdClient(leaderAddr)
if err != nil {
log.WithFields(
log.Fields{"nodeAddr": nodeAddr,
Expand Down Expand Up @@ -390,7 +423,7 @@ func (e *EtcdKVS) joinEtcdCluster() error {

// leaveEtcdCluster function is called when a manager is demoted
func (e *EtcdKVS) leaveEtcdCluster() error {
etcd, err := addrToEtcdClient(e.nodeAddr)
etcd, err := e.addrToEtcdClient(e.nodeAddr)
if err != nil {
log.WithFields(
log.Fields{"nodeAddr": e.nodeAddr,
Expand All @@ -414,6 +447,7 @@ func (e *EtcdKVS) leaveEtcdCluster() error {

// create the peer URL for filtering ETCD member information
// each ETCD member has a unique peer URL
etcdPeerPort := e.etcdPeerPort
peerAddr := etcdScheme + e.nodeAddr + etcdPeerPort
for _, member := range lresp.Members {
// loop all current etcd members to find if there is already a member with the same peerAddr
Expand Down Expand Up @@ -496,7 +530,7 @@ func (e *EtcdKVS) checkLocalEtcd() error {
select {
case <-ticker.C:
log.Infof("Checking ETCD client is started")
cli, err := addrToEtcdClient(e.nodeAddr)
cli, err := e.addrToEtcdClient(e.nodeAddr)
if err != nil {
log.WithFields(
log.Fields{"nodeAddr": e.nodeAddr,
Expand Down Expand Up @@ -874,7 +908,7 @@ func (e *EtcdKVS) createEtcdClient() *etcdClient.Client {
}

for _, manager := range managers {
etcd, err := addrToEtcdClient(manager.Addr)
etcd, err := e.addrToEtcdClient(manager.Addr)
if err == nil {
return etcd
}
Expand All @@ -890,9 +924,10 @@ func (e *EtcdKVS) createEtcdClient() *etcdClient.Client {
// addrToEtcdClient function create a new Etcd client according to the input docker address
// it can be used by swarm worker to get a Etcd client on swarm manager
// or it can be used by swarm manager to get a Etcd client on swarm leader
func addrToEtcdClient(addr string) (*etcdClient.Client, error) {
func (e *EtcdKVS) addrToEtcdClient(addr string) (*etcdClient.Client, error) {
// input address are RemoteManagers from docker info or ManagerStatus.Addr from docker inspect
// in the format of [host]:[docker manager port]
etcdClientPort := e.etcdClientPort
s := strings.Split(addr, ":")
endpoint := s[0] + etcdClientPort
cfg := etcdClient.Config{
Expand Down
5 changes: 5 additions & 0 deletions docs/external/vfile-plugin.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ docker plugin install --grant-all-permissions --alias vfile vmware/vfile:latest

Note: please make sure the base volume plugin is already installed!

Internally, vFile creates and uses etcd cluster to store metadata for volumes. By default, the etcd cluster listens on port 2379 for client communication and port 2380 for peer communication. If you has other etcd clutster which already listens on those default ports, you need to sepcify different ports to avoid conflict when installing the vFile plugin. Please see the following example:
```
docker plugin install --grant-all-permissions --alias vfile vmware/vfile:latest VFILE_TIMEOUT_IN_SECOND=90 VFILE_ETCD_CLIENT_PORT=4001 VFILE_ETCD_PEER_PORT=4002
```

* The `VFILE_TIMEOUT_IN_SECOND` setting is strongly recommended before [Issue #1954](https://github.com/vmware/docker-volume-vsphere/issues/1954) is resolved.

## Remove and Reinstallation
Expand Down
12 changes: 12 additions & 0 deletions plugin_dockerbuild/config.json-template
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,18 @@
"description": "Group ID of the socket file for the plugin",
"value": "root",
"Settable": [ "value"]
},
{
"name": "VFILE_ETCD_CLIENT_PORT",
"description": "Etcd client port number used by vFILE plugin",
"value": "",
"Settable": [ "value"]
},
{
"name": "VFILE_ETCD_PEER_PORT",
"description": "Etcd peer port number used by vFILE plugin",
"value": "",
"Settable": [ "value"]
}

]
Expand Down