From 6b3e710c9141a53fbab43077f80052cd9627fd54 Mon Sep 17 00:00:00 2001 From: Sergei Parshev Date: Thu, 5 Sep 2024 12:49:58 -0400 Subject: [PATCH 1/6] Added golangci-lint and multiple fixes for all kind of checks The most important: * Added golangci to github workflow * Bumped version of go to 1.22 to fix for loops vars * Removed cluster logic since dead code and better one in #30 * Fixed unauthorized access to application resource * Fixed not checked user struct type conversion in API * Multiple security and style fixes --- .github/workflows/main.yml | 18 ++ .golangci.yml | 128 ++++++++++++ check.sh | 4 +- cmd/fish/fish.go | 15 +- go.mod | 3 +- go.sum | 2 - lib/cluster/cluster.go | 73 ------- lib/cluster/clusterclient.go | 169 ---------------- lib/cluster/msg/msg.go | 16 -- lib/crypt/crypt.go | 2 +- lib/drivers/aws/config.go | 2 +- lib/drivers/aws/dedicated_pool.go | 18 +- lib/drivers/aws/driver.go | 7 +- lib/drivers/aws/util.go | 34 ++-- lib/drivers/docker/options.go | 2 +- lib/drivers/docker/util.go | 15 +- lib/drivers/image.go | 32 +-- lib/drivers/native/config.go | 34 ++-- lib/drivers/native/driver.go | 2 +- lib/drivers/native/options.go | 4 +- lib/drivers/native/util.go | 10 +- lib/drivers/test/driver.go | 4 +- lib/drivers/test/tasks.go | 6 +- lib/drivers/vmx/config.go | 6 +- lib/drivers/vmx/options.go | 2 +- lib/drivers/vmx/util.go | 16 +- lib/fish/drivers.go | 2 +- lib/fish/fish.go | 24 +-- lib/fish/node.go | 13 +- lib/fish/vote.go | 2 +- lib/openapi/api/api_v1.go | 298 +++++++++++++++++++--------- lib/openapi/cluster/client.go | 135 ------------- lib/openapi/cluster/cluster_v1.go | 126 ------------ lib/openapi/cluster/hub.go | 57 ------ lib/openapi/openapi.go | 10 +- lib/openapi/types/node.go | 4 +- lib/openapi/types/resources.go | 2 +- lib/proxy_ssh/proxy.go | 2 +- lib/util/file_replace_token.go | 3 +- lib/util/file_replace_token_test.go | 18 +- lib/util/file_starts_with_test.go | 6 +- lib/util/human_size.go | 9 +- lib/util/lock.go | 3 +- lib/util/metadata_processing.go | 8 +- lib/util/passthrough_monitor.go | 2 +- tests/helper/copy.go | 2 +- tests/helper/fish.go | 72 ++++--- tests/helper/t_mock.go | 1 + 48 files changed, 548 insertions(+), 875 deletions(-) create mode 100644 .golangci.yml delete mode 100644 lib/cluster/cluster.go delete mode 100644 lib/cluster/clusterclient.go delete mode 100644 lib/cluster/msg/msg.go delete mode 100644 lib/openapi/cluster/client.go delete mode 100644 lib/openapi/cluster/cluster_v1.go delete mode 100644 lib/openapi/cluster/hub.go diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5b21f5a..30ad651 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -46,6 +46,24 @@ jobs: swagger-editor-url: http://localhost/ definition-file: docs/openapi.yaml + GolangCI: + runs-on: ubuntu-latest + name: Code Lint + permissions: + # Required: allow read access to the content for analysis. + contents: read + # Optional: allow write access to checks to allow the action to annotate code in the PR. + checks: write + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: stable # Linter will use go.mod file to adjust the rules properly + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.60 + Build: runs-on: ubuntu-latest steps: diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..415447a --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,128 @@ +run: + concurrency: 4 + timeout: 10m + tests: false + allow-parallel-runners: true + allow-serial-runners: true + +output: + show-stats: true + +linters: + # Disable all linters. + # Default: false + disable-all: true + # Enable specific linter + # https://golangci-lint.run/usage/linters/#enabled-by-default + enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - canonicalheader + - containedctx + - contextcheck + - copyloopvar + #- cyclop + - decorder + #- depguard + - dogsled + #- dupl + - dupword + - durationcheck + #- err113 + #- errcheck # Maybe in the future? + - errchkjson + - errname + #- errorlint + - exhaustive + #- exhaustruct + - fatcontext + #- forbidigo + - forcetypeassert + #- funlen + #- gci + - ginkgolinter + - gocheckcompilerdirectives + #- gochecknoglobals + #- gochecknoinits + - gochecksumtype + #- gocognit + #- goconst + #- gocritic + #- gocyclo + #- godot + #- godox + - gofmt + #- gofumpt + - goheader + - goimports + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + - gosmopolitan + - govet + - grouper + - importas + - inamedparam + - ineffassign + - interfacebloat + - intrange + #- ireturn + #- lll + - loggercheck + #- maintidx + - makezero + - mirror + - misspell + #- mnd + - musttag + #- nakedret + #- nestif + - nilerr + #- nilnil + #- nlreturn + - noctx + - nolintlint + #- nonamedreturns + - nosprintfhostport + - paralleltest + #- perfsprint + #- prealloc + - predeclared + - promlinter + - protogetter + - reassign + #- revive # not supporting snake_case for vars + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - staticcheck + #- stylecheck + - tagalign + #- tagliatelle + - tenv + - testableexamples + - testifylint + - testpackage + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + #- varnamelen + - wastedassign + - whitespace + #- wrapcheck + #- wsl + - zerologlint + +linters-settings: + gosec: + excludes: + - G115 # integer overflow conversion - disabled due to found no proper way to fix those diff --git a/check.sh b/check.sh index 286d8f1..aa83d68 100755 --- a/check.sh +++ b/check.sh @@ -32,9 +32,9 @@ done echo echo '---------------------- GoFmt verify ----------------------' echo -reformat=$(gofmt -l . 2>&1) +reformat=$(gofmt -l -s . 2>&1) if [ "${reformat}" ]; then - echo "Please run 'gofmt -w .': \n${reformat}" + echo "Please run 'gofmt -s -w .': \n${reformat}" errors=$((${errors}+$(echo "${reformat}" | wc -l))) fi diff --git a/cmd/fish/fish.go b/cmd/fish/fish.go index 5e3e1fb..c22b87f 100644 --- a/cmd/fish/fish.go +++ b/cmd/fish/fish.go @@ -27,7 +27,6 @@ import ( "gorm.io/gorm/logger" "github.com/adobe/aquarium-fish/lib/build" - "github.com/adobe/aquarium-fish/lib/cluster" "github.com/adobe/aquarium-fish/lib/crypt" "github.com/adobe/aquarium-fish/lib/fish" "github.com/adobe/aquarium-fish/lib/log" @@ -44,7 +43,6 @@ func main() { var proxy_socks_address string var proxy_ssh_address string var node_address string - var cluster_join *[]string var cfg_path string var dir string var cpu_limit string @@ -83,9 +81,6 @@ func main() { if node_address != "" { cfg.NodeAddress = node_address } - if len(*cluster_join) > 0 { - cfg.ClusterJoin = *cluster_join - } if dir != "" { cfg.Directory = dir } @@ -174,14 +169,8 @@ func main() { return err } - log.Info("Fish joining cluster...") - cl, err := cluster.New(fish, cfg.ClusterJoin, ca_path, cert_path, key_path) - if err != nil { - return err - } - log.Info("Fish starting API...") - srv, err := openapi.Init(fish, cl, cfg.APIAddress, ca_path, cert_path, key_path) + srv, err := openapi.Init(fish, cfg.APIAddress, ca_path, cert_path, key_path) if err != nil { return err } @@ -199,7 +188,6 @@ func main() { log.Info("Fish stopping...") - cl.Stop() fish.Close() log.Info("Fish stopped") @@ -213,7 +201,6 @@ func main() { flags.StringVar(&proxy_socks_address, "socks_proxy", "", "address used to expose the SOCKS5 proxy") flags.StringVar(&proxy_ssh_address, "ssh_proxy", "", "address used to expose the SSH proxy") flags.StringVarP(&node_address, "node", "n", "", "node external endpoint to connect to tell the other nodes") - cluster_join = flags.StringSliceP("join", "j", nil, "addresses of existing cluster nodes to join, comma separated") flags.StringVarP(&cfg_path, "cfg", "c", "", "yaml configuration file") flags.StringVarP(&dir, "dir", "D", "", "database and other fish files directory") flags.StringVar(&cpu_limit, "cpu", "", "max amount of threads fish node will be able to utilize, default - no limit") diff --git a/go.mod b/go.mod index eba1aa0..04847d7 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/adobe/aquarium-fish -go 1.21.0 +go 1.22.2 require ( github.com/alessio/shellescape v1.4.1 @@ -14,7 +14,6 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/glebarez/sqlite v1.7.0 github.com/google/uuid v1.5.0 - github.com/gorilla/websocket v1.4.0 github.com/hpcloud/tail v1.0.0 github.com/labstack/echo/v4 v4.11.4 github.com/mostlygeek/arp v0.0.0-20170424181311-541a2129847a diff --git a/go.sum b/go.sum index dec50ff..eb32de9 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbu github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= diff --git a/lib/cluster/cluster.go b/lib/cluster/cluster.go deleted file mode 100644 index 16b943d..0000000 --- a/lib/cluster/cluster.go +++ /dev/null @@ -1,73 +0,0 @@ -package cluster - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net/url" - "os" - - "github.com/adobe/aquarium-fish/lib/fish" -) - -type Cluster struct { - fish *fish.Fish - - clients []*ClusterClient - - ca_pool *x509.CertPool - certkey tls.Certificate -} - -func New(fish *fish.Fish, join []string, ca_path, cert_path, key_path string) (*Cluster, error) { - c := &Cluster{ - fish: fish, - ca_pool: x509.NewCertPool(), - } - - // Load CA cert to pool - ca_bytes, err := os.ReadFile(ca_path) - if err != nil { - return nil, fmt.Errorf("Cluster: Unable to load CA certificate: %v", err) - } - if !c.ca_pool.AppendCertsFromPEM(ca_bytes) { - return nil, fmt.Errorf("Cluster: Incorrect CA pem data: %s", ca_path) - } - - // Load client cert and key - c.certkey, err = tls.LoadX509KeyPair(cert_path, key_path) - if err != nil { - return nil, fmt.Errorf("Cluster: Unable to load cert/key: %v", err) - } - - // Connect the join nodes - for _, endpoint := range join { - c.NewClient(endpoint, "cluster/v1/connect") - } - - return c, nil -} - -func (c *Cluster) NewClient(host, channel string) *ClusterClient { - conn := &ClusterClient{ - url: url.URL{Scheme: "wss", Host: host, Path: channel}, - send_buf: make(chan []byte, 1), - cluster: c, - } - conn.ctx, conn.ctxCancel = context.WithCancel(context.Background()) - - go conn.listen() - go conn.listenWrite() - go conn.ping() - - c.clients = append(c.clients, conn) - - return conn -} - -func (c *Cluster) Stop() { - for _, conn := range c.clients { - conn.Stop() - } -} diff --git a/lib/cluster/clusterclient.go b/lib/cluster/clusterclient.go deleted file mode 100644 index a198b8e..0000000 --- a/lib/cluster/clusterclient.go +++ /dev/null @@ -1,169 +0,0 @@ -package cluster - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "net/http" - "net/url" - "sync" - "time" - - "github.com/gorilla/websocket" - - "github.com/adobe/aquarium-fish/lib/log" -) - -// Send pings to peer with this period -const ping_period = 30 * time.Second - -type ClusterClient struct { - url url.URL - send_buf chan []byte - ctx context.Context - ctxCancel context.CancelFunc - - mu sync.RWMutex - wsconn *websocket.Conn - - cluster *Cluster -} - -func (conn *ClusterClient) Connect() *websocket.Conn { - conn.mu.Lock() - defer conn.mu.Unlock() - if conn.wsconn != nil { - return conn.wsconn - } - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - for ; ; <-ticker.C { - select { - case <-conn.ctx.Done(): - return nil - default: - config := &tls.Config{ - RootCAs: conn.cluster.ca_pool, - Certificates: []tls.Certificate{conn.cluster.certkey}, - } - dialer := &websocket.Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, - TLSClientConfig: config, - EnableCompression: true, - } - ws, _, err := dialer.Dial(conn.url.String(), nil) - if err != nil { - log.Errorf("ClusterClient %s: Cannot connect to websocket: %s: %v", conn.url.Host, conn.url.String(), err) - continue - } - - log.Infof("ClusterClient %s: Connected to node", conn.url.Host) - conn.wsconn = ws - - return conn.wsconn - } - } -} - -func (conn *ClusterClient) listen() { - log.Infof("ClusterClient %s: Listen for the messages", conn.url.Host) - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - for { - select { - case <-conn.ctx.Done(): - return - case <-ticker.C: - for { - ws := conn.Connect() - if ws == nil { - return - } - _, _ /*msg*/, err := ws.ReadMessage() - if err != nil { - log.Errorf("ClusterClient %s: Cannot read websocket message: %v", conn.url.Host, err) - conn.closeWs() - break - } - //log.Printf("ClusterClient %s: Received msg: %x\n", conn.url.Host, msg) - // TODO: Process msg - } - } - } -} - -// Write data to the websocket server -func (conn *ClusterClient) Write(payload any) error { - data, err := json.Marshal(payload) - if err != nil { - return err - } - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - - for { - select { - case conn.send_buf <- data: - return nil - case <-ctx.Done(): - return fmt.Errorf("context canceled") - } - } -} - -func (conn *ClusterClient) listenWrite() { - for data := range conn.send_buf { - ws := conn.Connect() - if ws == nil { - log.Errorf("ClusterClient %s: No websocket connection: %v", conn.url.Host, fmt.Errorf("ws is nil")) - continue - } - - if err := ws.WriteMessage( - websocket.TextMessage, - data, - ); err != nil { - log.Errorf("ClusterClient %s: Write error: %v", conn.url.Host, err) - } - } -} - -// Close will send close message and shutdown websocket connection -func (conn *ClusterClient) Stop() { - conn.ctxCancel() - conn.closeWs() -} - -// Close will send close message and shutdown websocket connection -func (conn *ClusterClient) closeWs() { - conn.mu.Lock() - if conn.wsconn != nil { - conn.wsconn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - conn.wsconn.Close() - conn.wsconn = nil - } - conn.mu.Unlock() -} - -func (conn *ClusterClient) ping() { - log.Infof("ClusterClient %s: Ping started", conn.url.Host) - ticker := time.NewTicker(ping_period) - defer ticker.Stop() - for { - select { - case <-ticker.C: - ws := conn.Connect() - if ws == nil { - continue - } - if err := conn.wsconn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(ping_period/2)); err != nil { - conn.closeWs() - } - case <-conn.ctx.Done(): - return - } - } -} diff --git a/lib/cluster/msg/msg.go b/lib/cluster/msg/msg.go deleted file mode 100644 index f7dc46c..0000000 --- a/lib/cluster/msg/msg.go +++ /dev/null @@ -1,16 +0,0 @@ -package msg - -type Nodes struct { - Type string `json:"type"` - //Data []*Node `json:"data"` -} - -func NewNodes() *Nodes { - return &Nodes{ - Type: "nodes", - } -} - -/*func (m *Nodes) AddNode(node *Node) { - m.Data = append(m.Data, node) -}*/ diff --git a/lib/crypt/crypt.go b/lib/crypt/crypt.go index ce89461..86cd343 100644 --- a/lib/crypt/crypt.go +++ b/lib/crypt/crypt.go @@ -112,7 +112,7 @@ func (h *Hash) IsEqual(input string) bool { h.Prop.Threads = v074_Argon2_Threads } - return bytes.Compare(h.Hash, argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, uint32(len(h.Hash)))) == 0 + return bytes.Equal(h.Hash, argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, uint32(len(h.Hash)))) } func (hash *Hash) IsEmpty() bool { diff --git a/lib/drivers/aws/config.go b/lib/drivers/aws/config.go index 65e7d59..ed3423e 100644 --- a/lib/drivers/aws/config.go +++ b/lib/drivers/aws/config.go @@ -126,7 +126,7 @@ func (c *Config) Validate() (err error) { // It helps with the machines where internet is not available right away retries := 6 counter := 0 - account := "" + var account string for { res, err := conn.GetCallerIdentity(context.TODO(), input) counter++ diff --git a/lib/drivers/aws/dedicated_pool.go b/lib/drivers/aws/dedicated_pool.go index dddd558..9762391 100644 --- a/lib/drivers/aws/dedicated_pool.go +++ b/lib/drivers/aws/dedicated_pool.go @@ -128,8 +128,8 @@ func (w *dedicatedPoolWorker) ReserveHost(instance_type string) string { } // Pick random one from the list of available hosts to reduce the possibility of conflict - host := w.active_hosts[available_hosts[rand.Intn(len(available_hosts))]] - // Mark it as reserved temporarly to ease multi-allocation at the same time + host := w.active_hosts[available_hosts[rand.Intn(len(available_hosts))]] // #nosec G404 + // Mark it as reserved temporary to ease multi-allocation at the same time host.State = HOST_RESERVED w.active_hosts[aws.ToString(host.HostId)] = host return aws.ToString(host.HostId) @@ -271,7 +271,7 @@ func (w *dedicatedPoolWorker) manageHosts() []string { // Skipping the hosts that already in managed list found := false - for hid, _ := range w.to_manage_at { + for hid := range w.to_manage_at { if host_id == hid { found = true break @@ -315,7 +315,7 @@ func (w *dedicatedPoolWorker) releaseHosts(release_hosts []string) { if host, ok := w.active_hosts[host_id]; ok && host.HostProperties != nil { if isHostMac(&host) { mac_hosts = append(mac_hosts, host_id) - // If mac host not reached 24h since allocation - skipping addtion to the release list + // If mac host not reached 24h since allocation - skipping addition to the release list if !isHostReadyForRelease(&host) { continue } @@ -398,7 +398,7 @@ func isHostReadyForRelease(host *ec2_types.Host) bool { // Check if the host is used func isHostUsed(host *ec2_types.Host) bool { - if host.State == HOST_RESERVED || host.Instances != nil && len(host.Instances) > 0 { + if host.State == HOST_RESERVED || len(host.Instances) > 0 { return true } return false @@ -456,7 +456,7 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { p := ec2.NewDescribeHostsPaginator(conn, &ec2.DescribeHostsInput{ Filter: []ec2_types.Filter{ // We don't need released hosts, so skipping them - ec2_types.Filter{ + { Name: aws.String("state"), Values: []string{ string(ec2_types.AllocationStateAvailable), @@ -465,15 +465,15 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { string(ec2_types.AllocationStatePending), }, }, - ec2_types.Filter{ + { Name: aws.String("availability-zone"), Values: []string{w.record.Zone}, }, - ec2_types.Filter{ + { Name: aws.String("instance-type"), Values: []string{w.record.Type}, }, - ec2_types.Filter{ + { Name: aws.String("tag-key"), Values: []string{"AquariumDedicatedPool-" + w.name}, }, diff --git a/lib/drivers/aws/driver.go b/lib/drivers/aws/driver.go index 2392dd2..195f172 100644 --- a/lib/drivers/aws/driver.go +++ b/lib/drivers/aws/driver.go @@ -151,11 +151,11 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDe // Quotas for hosts are: "Running Dedicated mac1 Hosts" & "Running Dedicated mac2 Hosts" p := ec2.NewDescribeHostsPaginator(conn_ec2, &ec2.DescribeHostsInput{ Filter: []ec2_types.Filter{ - ec2_types.Filter{ + { Name: aws.String("instance-type"), Values: []string{opts.InstanceType}, }, - ec2_types.Filter{ + { Name: aws.String("state"), Values: []string{"available"}, }, @@ -315,7 +315,6 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* } else { return nil, fmt.Errorf("AWS: %s: Unable to locate the dedicated pool: %s", i_name, opts.Pool) } - } else if awsInstTypeAny(opts.InstanceType, "mac") { // For mac machines only dedicated hosts are working, so set the tenancy input.Placement = &ec2_types.Placement{ @@ -329,7 +328,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* if err != nil { return nil, fmt.Errorf("AWS: %s: Unable to serialize metadata to userdata: %v", i_name, err) } - input.UserData = aws.String(base64.StdEncoding.EncodeToString([]byte(userdata))) + input.UserData = aws.String(base64.StdEncoding.EncodeToString(userdata)) } if opts.SecurityGroup != "" { diff --git a/lib/drivers/aws/util.go b/lib/drivers/aws/util.go index 6fe9355..e366662 100644 --- a/lib/drivers/aws/util.go +++ b/lib/drivers/aws/util.go @@ -98,7 +98,7 @@ func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, er req := ec2.DescribeVpcsInput{ Filters: []types.Filter{ filter, - types.Filter{ + { Name: aws.String("owner-id"), Values: d.cfg.AccountIDs, }, @@ -110,7 +110,7 @@ func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, er req := ec2.DescribeSubnetsInput{ Filters: []types.Filter{ filter, - types.Filter{ + { Name: aws.String("owner-id"), Values: d.cfg.AccountIDs, }, @@ -218,11 +218,11 @@ func (d *Driver) getImageId(conn *ec2.Client, id_name string) (string, error) { // Look for image with the defined name req := ec2.DescribeImagesInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("name"), Values: []string{id_name}, }, - types.Filter{ + { Name: aws.String("state"), Values: []string{"available"}, }, @@ -321,23 +321,23 @@ func (d *Driver) getImageIdByType(conn *ec2.Client, instance_type string) (strin log.Debugf("AWS: Looking an image: Checking past year from %d", images_till.Year()) req := ec2.DescribeImagesInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("architecture"), Values: []string{string(type_arch)}, }, - types.Filter{ + { Name: aws.String("creation-date"), Values: awsLastYearFilterValues(images_till), }, - types.Filter{ + { Name: aws.String("is-public"), Values: []string{"true"}, }, - types.Filter{ + { Name: aws.String("owner-alias"), Values: []string{"amazon"}, // Use only amazon-provided images }, - types.Filter{ + { Name: aws.String("state"), Values: []string{"available"}, }, @@ -377,11 +377,11 @@ func (d *Driver) getSecGroupId(conn *ec2.Client, id_name string) (string, error) // Look for security group with the defined name req := ec2.DescribeSecurityGroupsInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("group-name"), Values: []string{id_name}, }, - types.Filter{ + { Name: aws.String("owner-id"), Values: d.cfg.AccountIDs, }, @@ -414,11 +414,11 @@ func (d *Driver) getSnapshotId(conn *ec2.Client, id_tag string) (string, error) // Look for VPC with the defined tag over pages req := ec2.DescribeSnapshotsInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("tag:" + tag_key_val[0]), Values: []string{tag_key_val[1]}, }, - types.Filter{ + { Name: aws.String("status"), Values: []string{"completed"}, }, @@ -460,7 +460,7 @@ func (d *Driver) getProjectCpuUsage(conn *ec2.Client, inst_types []string) (int6 // checking if the instance is actually starts with type+number. req := ec2.DescribeInstancesInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("instance-state-name"), // Confirmed by AWS eng: only terminated instances are not counting in utilization Values: []string{"pending", "running", "shutting-down", "stopping", "stopped"}, @@ -491,7 +491,7 @@ func (d *Driver) getProjectCpuUsage(conn *ec2.Client, inst_types []string) (int6 func (d *Driver) getInstance(conn *ec2.Client, inst_id string) (*types.Instance, error) { input := ec2.DescribeInstancesInput{ Filters: []types.Filter{ - types.Filter{ + { Name: aws.String("instance-id"), Values: []string{inst_id}, }, @@ -503,7 +503,7 @@ func (d *Driver) getInstance(conn *ec2.Client, inst_id string) (*types.Instance, return nil, err } if len(resp.Reservations) < 1 || len(resp.Reservations[0].Instances) < 1 { - return nil, nil + return nil, fmt.Errorf("Returned empty reservations or instances lists") } return &resp.Reservations[0].Instances[0], nil } @@ -609,7 +609,7 @@ func (d *Driver) triggerHostScrubbing(host_id, instance_type string) (err error) conn := d.newEC2Conn() // Just need an image, which we could find by looking at the host instance type - vm_image := "" + var vm_image string if vm_image, err = d.getImageIdByType(conn, instance_type); err != nil { return fmt.Errorf("AWS: scrubbing %s: Unable to find image: %v", host_id, err) } diff --git a/lib/drivers/docker/options.go b/lib/drivers/docker/options.go index 7ee895b..d378417 100644 --- a/lib/drivers/docker/options.go +++ b/lib/drivers/docker/options.go @@ -45,7 +45,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { func (o *Options) Validate() error { // Check images var img_err error - for index, _ := range o.Images { + for index := range o.Images { if err := o.Images[index].Validate(); err != nil { img_err = log.Error("Docker: Error during image validation:", err) } diff --git a/lib/drivers/docker/util.go b/lib/drivers/docker/util.go index 9bdc7d8..47054b5 100644 --- a/lib/drivers/docker/util.go +++ b/lib/drivers/docker/util.go @@ -125,7 +125,7 @@ func (d *Driver) getAvailResources() (avail_cpu, avail_ram uint) { return } -// Returns the standartized container name +// Returns the standardized container name func (d *Driver) getContainerName(hwaddr string) string { return fmt.Sprintf("fish-%s", strings.ReplaceAll(hwaddr, ":", "")) } @@ -138,7 +138,7 @@ func (d *Driver) loadImages(opts *Options) (string, error) { log.Info("Docker: Loading the required image:", image.Name, image.Version, image.Url) // Running the background routine to download, unpack and process the image - // Success will be checked later by existance of the image in local docker registry + // Success will be checked later by existence of the image in local docker registry wg.Add(1) go func(image drivers.Image) { defer wg.Done() @@ -313,7 +313,7 @@ func (d *Driver) disksCreate(c_name string, run_args *[]string, disks map[string // Do not recreate the disk if it is exists if _, err := os.Stat(dmg_path); os.IsNotExist(err) { - disk_type := "" + var disk_type string switch disk.Type { case "hfs+": disk_type = "HFS+" @@ -370,7 +370,7 @@ func (d *Driver) envCreate(c_name string, metadata map[string]any) (string, erro if err := os.MkdirAll(filepath.Dir(env_file_path), 0o755); err != nil { return "", log.Error("Docker: Unable to create the container directory:", filepath.Dir(env_file_path), err) } - fd, err := os.OpenFile(env_file_path, os.O_WRONLY|os.O_CREATE, 0640) + fd, err := os.OpenFile(env_file_path, os.O_WRONLY|os.O_CREATE, 0o640) if err != nil { return "", log.Error("Docker: Unable to create env file:", env_file_path, err) } @@ -378,7 +378,8 @@ func (d *Driver) envCreate(c_name string, metadata map[string]any) (string, erro // Write env file line by line for key, value := range metadata { - if _, err := fd.Write([]byte(fmt.Sprintf("%s=%s\n", key, value))); err != nil { + data := []byte(fmt.Sprintf("%s=%s\n", key, value)) + if _, err := fd.Write(data); err != nil { return "", log.Error("Docker: Unable to write env file data:", env_file_path, err) } } @@ -424,8 +425,8 @@ func runAndLog(timeout time.Duration, path string, arg ...string) (string, strin } // Replace these for Windows, we only want to deal with Unix style line endings. - returnStdout := strings.Replace(stdout.String(), "\r\n", "\n", -1) - returnStderr := strings.Replace(stderr.String(), "\r\n", "\n", -1) + returnStdout := strings.ReplaceAll(stdout.String(), "\r\n", "\n") + returnStderr := strings.ReplaceAll(stderr.String(), "\r\n", "\n") return returnStdout, returnStderr, err } diff --git a/lib/drivers/image.go b/lib/drivers/image.go index 2ba1528..a3749ce 100644 --- a/lib/drivers/image.go +++ b/lib/drivers/image.go @@ -14,8 +14,9 @@ package drivers import ( "archive/tar" - "crypto/md5" - "crypto/sha1" + "context" + "crypto/md5" // #nosec G501 + "crypto/sha1" // #nosec G505 "crypto/sha256" "crypto/sha512" "encoding/hex" @@ -61,7 +62,7 @@ func (i *Image) Validate() error { i.Name = path.Base(i.Url) minus_loc := strings.LastIndexByte(i.Name, '-') if minus_loc != -1 { - // Use the part from beginnig to last minus ('-') - useful to separate version part + // Use the part from beginning to last minus ('-') - useful to separate version part i.Name = i.Name[0:minus_loc] } else if strings.LastIndexByte(i.Name, '.') != -1 { // Split by extension - need to take into account dual extension of tar archives (ex. ".tar.xz") @@ -139,7 +140,7 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { defer os.Remove(lock_path) client := &http.Client{} - req, _ := http.NewRequest("GET", i.Url, nil) + req, _ := http.NewRequestWithContext(context.TODO(), http.MethodGet, i.Url, nil) if user != "" && password != "" { req.SetBasicAuth(user, password) } @@ -150,7 +151,7 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { os.RemoveAll(img_path) return fmt.Errorf("Image: Unable to download file %q: %s", i.Url, resp.Status) } @@ -174,9 +175,9 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { // Calculating checksum during reading from the body switch algo_sum[0] { case "md5": - hasher = md5.New() + hasher = md5.New() // #nosec G401 case "sha1": - hasher = sha1.New() + hasher = sha1.New() // #nosec G401 case "sha256": hasher = sha256.New() case "sha512": @@ -191,12 +192,12 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { // Check if headers contains the needed algo:hash for quick validation // We're not completely trust the server, but if it returns the wrong sum - we're dropping. // Header should look like: X-Checksum-Md5 X-Checksum-Sha1 X-Checksum-Sha256 (Artifactory) - if remote_sum := resp.Header.Get("X-Checksum-" + strings.Title(algo_sum[0])); remote_sum != "" { + if remote_sum := resp.Header.Get("X-Checksum-" + strings.Title(algo_sum[0])); remote_sum != "" { //nolint:staticcheck // SA1019 Strictly ASCII here // Server returned mathing header, so compare it's value to our checksum if remote_sum != algo_sum[1] { os.RemoveAll(img_path) return fmt.Errorf("Image: The remote checksum (from header X-Checksum-%s) doesn't equal the desired one: %q != %q for %q", - strings.Title(algo_sum[0]), remote_sum, algo_sum[1], i.Url) + strings.Title(algo_sum[0]), remote_sum, algo_sum[1], i.Url) //nolint:staticcheck // SA1019 Strictly ASCII here } } } @@ -229,7 +230,7 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { return fmt.Errorf("Image: The archive filepath contains '..' which is security forbidden: %q", hdr.Name) } - target := filepath.Join(img_path, hdr.Name) + target := filepath.Join(img_path, hdr.Name) // #nosec G305 , checked above switch hdr.Typeflag { case tar.TypeDir: @@ -239,7 +240,7 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { os.RemoveAll(img_path) return fmt.Errorf("Image: Unable to create directory %q: %v", target, err) } - case tar.TypeReg, tar.TypeRegA: + case tar.TypeReg: // Write a file log.Debugf("Util: Extracting '%s': %s", img_path, hdr.Name) err = os.MkdirAll(filepath.Dir(target), 0750) @@ -255,8 +256,13 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { defer w.Close() // TODO: Add in-stream sha256 calculation for each file to verify against .sha256 data - _, err = io.Copy(w, tr) - if err != nil { + for { + _, err = io.CopyN(w, tr, 8196) + if err == nil { + continue + } else if err == io.EOF { + break + } os.RemoveAll(img_path) return fmt.Errorf("Image: Unable to unpack content to file %q: %v", target, err) } diff --git a/lib/drivers/native/config.go b/lib/drivers/native/config.go index 6aab020..11da509 100644 --- a/lib/drivers/native/config.go +++ b/lib/drivers/native/config.go @@ -113,7 +113,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.SudoPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `sudo` path: %s, %s", c.SudoPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `sudo` binary is not executable: %s", c.SudoPath) } } @@ -128,7 +128,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.SuPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `su` path: %s, %s", c.SuPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `su` binary is not executable: %s", c.SuPath) } } @@ -143,7 +143,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.ShPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `sh` path: %s, %s", c.ShPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `sh` binary is not executable: %s", c.ShPath) } } @@ -157,7 +157,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.TarPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `tar` path: %s, %s", c.TarPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `tar` binary is not executable: %s", c.TarPath) } } @@ -171,7 +171,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.MountPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `mount` path: %s, %s", c.MountPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `mount` binary is not executable: %s", c.MountPath) } } @@ -185,7 +185,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.ChownPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `chown` path: %s, %s", c.ChownPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `chown` binary is not executable: %s", c.ChownPath) } } @@ -199,7 +199,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.ChmodPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `chmod` path: %s, %s", c.ChmodPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `chmod` binary is not executable: %s", c.ChmodPath) } } @@ -213,7 +213,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.KillallPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `killall` path: %s, %s", c.KillallPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `killall` binary is not executable: %s", c.KillallPath) } } @@ -227,7 +227,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.RmPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `rm` path: %s, %s", c.RmPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: `rm` binary is not executable: %s", c.RmPath) } } @@ -243,7 +243,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.DsclPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `dscl` path: %s, %s", c.DsclPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: macos `dscl` binary is not executable: %s", c.DsclPath) } } @@ -257,7 +257,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.HdiutilPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `hdiutil` path: %s, %s", c.HdiutilPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: macos `hdiutil` binary is not executable: %s", c.HdiutilPath) } } @@ -271,7 +271,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.MdutilPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `mdutil` path: %s, %s", c.MdutilPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: macos `mdutil` binary is not executable: %s", c.MdutilPath) } } @@ -285,7 +285,7 @@ func (c *Config) Validate() (err error) { if info, err := os.Stat(c.CreatehomedirPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `createhomedir` path: %s, %s", c.CreatehomedirPath, err) } else { - if info.Mode()&0111 == 0 { + if info.Mode()&0o111 == 0 { return fmt.Errorf("Native: macos `createhomedir` binary is not executable: %s", c.CreatehomedirPath) } } @@ -341,8 +341,8 @@ func (c *Config) Validate() (err error) { return err } - if c.CpuAlter < 0 && int(cpu_stat) <= -c.CpuAlter { - return log.Errorf("Native: |CpuAlter| can't be more or equal the avaialble Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) + if c.CpuAlter < 0 && cpu_stat <= -c.CpuAlter { + return log.Errorf("Native: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) } mem_stat, err := mem.VirtualMemory() @@ -352,7 +352,7 @@ func (c *Config) Validate() (err error) { ram_stat := mem_stat.Total / 1073741824 // Getting GB from Bytes if c.RamAlter < 0 && int(ram_stat) <= -c.RamAlter { - return log.Errorf("Native: |RamAlter| can't be more or equal the avaialble Host RAM: |%d| > %d", c.RamAlter, ram_stat) + return log.Errorf("Native: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ram_stat) } return nil @@ -363,7 +363,7 @@ func testScriptCreate(user string) (path string, err error) { path = filepath.Join("/tmp", user+"-init.sh") script := []byte("#!/bin/sh\nid\n") - return path, os.WriteFile(path, script, 0755) + return path, os.WriteFile(path, script, 0o755) // #nosec G306 } // Will delete the config test script diff --git a/lib/drivers/native/driver.go b/lib/drivers/native/driver.go index bc3c147..135c939 100644 --- a/lib/drivers/native/driver.go +++ b/lib/drivers/native/driver.go @@ -101,7 +101,7 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { // Empty name means user home which is always exists if img.Tag != "" { found := false - for d_name, _ := range def.Resources.Disks { + for d_name := range def.Resources.Disks { if d_name == img.Tag { found = true break diff --git a/lib/drivers/native/options.go b/lib/drivers/native/options.go index 07e7fa2..9a5c3da 100644 --- a/lib/drivers/native/options.go +++ b/lib/drivers/native/options.go @@ -40,7 +40,7 @@ import ( */ type Options struct { Images []drivers.Image `json:"images"` // Optional list of image dependencies, they will be unpacked in order - //TODO: Setup string `json:"setup"` // Optional path to the executable, it will be started before the Entry with escalated priveleges + //TODO: Setup string `json:"setup"` // Optional path to the executable, it will be started before the Entry with escalated privileges Entry string `json:"entry"` // Optional path to the executable, it will be running as workload (default: init.sh / init.ps1) Groups []string `json:"groups"` // Optional user groups user should have, first one is primary (default: staff) } @@ -85,7 +85,7 @@ func (o *Options) Validate() error { // Check images var img_err error - for index, _ := range o.Images { + for index := range o.Images { if err := o.Images[index].Validate(); err != nil { img_err = log.Error("Native: Error during image validation:", err) } diff --git a/lib/drivers/native/util.go b/lib/drivers/native/util.go index 2c2fae1..8a33f17 100644 --- a/lib/drivers/native/util.go +++ b/lib/drivers/native/util.go @@ -294,7 +294,7 @@ func userRun(c *Config, env_data *EnvData, user, entry string, metadata map[stri // Prepare the command to execute entry from user home directory shell_line := fmt.Sprintf("source %s; %s", env_file.Name(), shellescape.Quote(shellescape.StripUnsafe(entry))) - cmd := exec.Command(c.SudoPath, "-n", c.SuPath, "-l", user, "-c", shell_line) + cmd := exec.Command(c.SudoPath, "-n", c.SuPath, "-l", user, "-c", shell_line) // #nosec G204 if env_data != nil && env_data.Disks != nil { if _, ok := env_data.Disks[""]; ok { cmd.Dir = env_data.Disks[""] @@ -332,7 +332,7 @@ func userRun(c *Config, env_data *EnvData, user, entry string, metadata map[stri } // Stop the user processes -func userStop(c *Config, user string) (out_err error) { +func userStop(c *Config, user string) (out_err error) { //nolint:unparam // In theory we can use `sysadminctl -deleteUser` command instead, which is also stopping all the // user processes and cleans up the home dir, but it asks for elevated previleges so not sure how // useful it will be in automation... @@ -525,14 +525,14 @@ func runAndLog(timeout time.Duration, stdin io.Reader, path string, arg ...strin } // Replace these for Windows, we only want to deal with Unix style line endings. - returnStdout := strings.Replace(stdout.String(), "\r\n", "\n", -1) - returnStderr := strings.Replace(stderr.String(), "\r\n", "\n", -1) + returnStdout := strings.ReplaceAll(stdout.String(), "\r\n", "\n") + returnStderr := strings.ReplaceAll(stderr.String(), "\r\n", "\n") return returnStdout, returnStderr, err } // Will retry on error and store the retry output and errors to return -func runAndLogRetry(retry int, timeout time.Duration, stdin io.Reader, path string, arg ...string) (stdout string, stderr string, err error) { +func runAndLogRetry(retry int, timeout time.Duration, stdin io.Reader, path string, arg ...string) (stdout string, stderr string, err error) { //nolint:unparam counter := 0 for { counter++ diff --git a/lib/drivers/test/driver.go b/lib/drivers/test/driver.go index b8f8d08..16df839 100644 --- a/lib/drivers/test/driver.go +++ b/lib/drivers/test/driver.go @@ -153,7 +153,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Generate random resource id and if exists - regenerate res := &types.Resource{} - res_file := "" + var res_file string for { res.Identifier = "test-" + crypt.RandString(6) res_file = filepath.Join(d.cfg.WorkspacePath, res.Identifier) @@ -238,7 +238,7 @@ func randomFail(name string, probability uint8) error { } // Fail on probability 1 - low, 254 - high (but still can not fail) - if uint8(rand.Intn(254)) < probability { + if uint8(rand.Intn(254)) < probability { //nolint:gosec // G402,G404 -- fine for test driver return fmt.Errorf("TEST: %s failed (%d)", name, probability) } diff --git a/lib/drivers/test/tasks.go b/lib/drivers/test/tasks.go index 0959dbd..1c63455 100644 --- a/lib/drivers/test/tasks.go +++ b/lib/drivers/test/tasks.go @@ -59,14 +59,12 @@ func (t *TaskSnapshot) Execute() (result []byte, err error) { return []byte(`{"error":"internal: invalid resource"}`), log.Error("TEST: Invalid resource:", t.Resource) } if err := randomFail(fmt.Sprintf("Snapshot %s", t.Resource.Identifier), t.driver.cfg.FailSnapshot); err != nil { - out, _ := json.Marshal(map[string]any{}) - return out, log.Error("TEST: RandomFail:", err) + return []byte(`{}`), log.Error("TEST: RandomFail:", err) } res_file := filepath.Join(t.driver.cfg.WorkspacePath, t.Resource.Identifier) if _, err := os.Stat(res_file); os.IsNotExist(err) { - out, _ := json.Marshal(map[string]any{}) - return out, fmt.Errorf("TEST: Unable to snapshot unavailable resource '%s'", t.Resource.Identifier) + return []byte(`{}`), fmt.Errorf("TEST: Unable to snapshot unavailable resource '%s'", t.Resource.Identifier) } return json.Marshal(map[string]any{"snapshots": []string{"test-snapshot"}, "when": t.ApplicationTask.When}) diff --git a/lib/drivers/vmx/config.go b/lib/drivers/vmx/config.go index 0f100d5..210255b 100644 --- a/lib/drivers/vmx/config.go +++ b/lib/drivers/vmx/config.go @@ -112,8 +112,8 @@ func (c *Config) Validate() (err error) { return err } - if c.CpuAlter < 0 && int(cpu_stat) <= -c.CpuAlter { - return log.Errorf("VMX: |CpuAlter| can't be more or equal the avaialble Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) + if c.CpuAlter < 0 && cpu_stat <= -c.CpuAlter { + return log.Errorf("VMX: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) } mem_stat, err := mem.VirtualMemory() @@ -123,7 +123,7 @@ func (c *Config) Validate() (err error) { ram_stat := mem_stat.Total / 1073741824 // Getting GB from Bytes if c.RamAlter < 0 && int(ram_stat) <= -c.RamAlter { - return log.Errorf("VMX: |RamAlter| can't be more or equal the avaialble Host RAM: |%d| > %d", c.RamAlter, ram_stat) + return log.Errorf("VMX: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ram_stat) } return nil diff --git a/lib/drivers/vmx/options.go b/lib/drivers/vmx/options.go index 05616ad..9aa537c 100644 --- a/lib/drivers/vmx/options.go +++ b/lib/drivers/vmx/options.go @@ -45,7 +45,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { func (o *Options) Validate() error { // Check images var img_err error - for index, _ := range o.Images { + for index := range o.Images { if err := o.Images[index].Validate(); err != nil { img_err = log.Error("VMX: Error during image validation:", err) } diff --git a/lib/drivers/vmx/util.go b/lib/drivers/vmx/util.go index 4011263..f7afda4 100644 --- a/lib/drivers/vmx/util.go +++ b/lib/drivers/vmx/util.go @@ -61,7 +61,7 @@ func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) log.Info("VMX: Loading the required image:", image.Name, image.Version, image.Url) // Running the background routine to download, unpack and process the image - // Success will be checked later by existance of the copied image in the vm directory + // Success will be checked later by existence of the copied image in the vm directory wg.Add(1) go func(image drivers.Image, index int) error { defer wg.Done() @@ -214,7 +214,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi // Create virtual disk dmg_path := disk_path + ".dmg" - disk_type := "" + var disk_type string switch disk.Type { case "hfs+": disk_type = "HFS+" @@ -255,7 +255,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi } // Umount disk (use diskutil to umount for sure) - stdout, _, err = runAndLog(10*time.Second, "/usr/sbin/diskutil", "umount", mount_point) + _, _, err = runAndLog(10*time.Second, "/usr/sbin/diskutil", "umount", mount_point) if err != nil { return log.Error("VMX: Unable to umount dmg disk:", mount_point, err) } @@ -271,7 +271,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi // mounted at the same time, so avoiding to use it by using template: // `Unable to create the source raw disk: Resource deadlock avoided` // To generate template: vmware-rawdiskCreator create /dev/disk2 1 ./disk_name lsilogic - vmdk_tempalte := strings.Join([]string{ + vmdk_template := strings.Join([]string{ `# Disk DescriptorFile`, `version=1`, `encoding="UTF-8"`, @@ -294,7 +294,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi `ddb.virtualHWVersion = "14"`, }, "\n") - if err := os.WriteFile(disk_path+"_tmp.vmdk", []byte(vmdk_tempalte), 0640); err != nil { + if err := os.WriteFile(disk_path+"_tmp.vmdk", []byte(vmdk_template), 0o640); err != nil { //nolint:gosec // G306 return log.Error("VMX: Unable to place the template vmdk file:", disk_path+"_tmp.vmdk", err) } @@ -402,14 +402,14 @@ func runAndLog(timeout time.Duration, path string, arg ...string) (string, strin } // Replace these for Windows, we only want to deal with Unix style line endings. - returnStdout := strings.Replace(stdout.String(), "\r\n", "\n", -1) - returnStderr := strings.Replace(stderr.String(), "\r\n", "\n", -1) + returnStdout := strings.ReplaceAll(stdout.String(), "\r\n", "\n") + returnStderr := strings.ReplaceAll(stderr.String(), "\r\n", "\n") return returnStdout, returnStderr, err } // Will retry on error and store the retry output and errors to return -func runAndLogRetry(retry int, timeout time.Duration, path string, arg ...string) (stdout string, stderr string, err error) { +func runAndLogRetry(retry int, timeout time.Duration, path string, arg ...string) (stdout string, stderr string, err error) { //nolint:unparam counter := 0 for { counter++ diff --git a/lib/fish/drivers.go b/lib/fish/drivers.go index 190958d..f70a55e 100644 --- a/lib/fish/drivers.go +++ b/lib/fish/drivers.go @@ -35,7 +35,7 @@ func (f *Fish) DriverGet(name string) drivers.ResourceDriver { log.Error("Fish: Resource drivers are not initialized to request the driver instance:", name) return nil } - drv, _ := drivers_instances[name] + drv := drivers_instances[name] return drv } diff --git a/lib/fish/fish.go b/lib/fish/fish.go index ec5001d..b74e3e6 100644 --- a/lib/fish/fish.go +++ b/lib/fish/fish.go @@ -15,7 +15,6 @@ package fish import ( "encoding/json" "fmt" - "math/rand" "os" "os/signal" "path" @@ -58,7 +57,7 @@ type Fish struct { applications_mutex sync.Mutex applications []types.ApplicationUID - // Used to temporarly store the won Votes by Application create time + // Used to temporary store the won Votes by Application create time won_votes_mutex sync.Mutex won_votes map[int64]types.Vote @@ -68,9 +67,6 @@ type Fish struct { } func New(db *gorm.DB, cfg *Config) (*Fish, error) { - // Init rand generator - rand.Seed(time.Now().UnixNano()) - f := &Fish{db: db, cfg: cfg} if err := f.Init(); err != nil { return nil, err @@ -246,14 +242,15 @@ func (f *Fish) GetLocationName() types.LocationName { return f.node.LocationName } -func (f *Fish) checkNewApplicationProcess() error { +func (f *Fish) checkNewApplicationProcess() { check_ticker := time.NewTicker(5 * time.Second) for { if !f.running { break } - select { - case <-check_ticker.C: + // TODO: Here should be select with quit in case app is stopped to not wait next ticker + <-check_ticker.C + { // Check new apps available for processing new_apps, err := f.ApplicationListGetStatusNew() if err != nil { @@ -305,7 +302,6 @@ func (f *Fish) checkNewApplicationProcess() error { f.won_votes_mutex.Unlock() } } - return nil } func (f *Fish) voteProcessRound(vote *types.Vote) error { @@ -433,7 +429,7 @@ func (f *Fish) isNodeAvailableForDefinition(def types.LabelDefinition) bool { // Verify node filters because some workload can't be running on all the physical nodes // The node becomes fitting only when all the needed node filter patterns are matched - if def.Resources.NodeFilter != nil && len(def.Resources.NodeFilter) > 0 { + if len(def.Resources.NodeFilter) > 0 { needed_idents := def.Resources.NodeFilter current_idents := f.cfg.NodeIdentifiers for _, needed := range needed_idents { @@ -625,7 +621,7 @@ func (f *Fish) executeApplication(vote types.Vote) error { log.Error("Fish: Unable to store Resource for Application:", app.UID, err) } app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusALLOCATED, - Description: fmt.Sprint("Driver allocated the resource"), + Description: "Driver allocated the resource", } log.Infof("Fish: Allocated Resource %q for the Application %s", app.UID, res.Identifier) } @@ -701,7 +697,7 @@ func (f *Fish) executeApplication(vote types.Vote) error { } else { log.Info("Fish: Successful deallocation of the Application:", app.UID) app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusDEALLOCATED, - Description: fmt.Sprint("Driver deallocated the resource"), + Description: "Driver deallocated the resource", } } // Destroying the resource anyway to not bloat the table - otherwise it will stuck there and @@ -749,7 +745,7 @@ func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.La t := drv.GetTask(task.Task, string(task.Options)) if t == nil { log.Error("Fish: Unable to get associated driver task type for Application:", res.ApplicationUID, task.Task) - task.Result = util.UnparsedJson(`{"error":"task not availble in driver"}`) + task.Result = util.UnparsedJson(`{"error":"task not available in driver"}`) } else { // Executing the task t.SetInfo(&task, def, res) @@ -873,7 +869,7 @@ func (f *Fish) activateShutdown() { fire_shutdown <- true } case <-delay_ticker_report.C: - log.Infof("Fish: Shutdown: countdown: T-%v", delay_end_time.Sub(time.Now())) + log.Infof("Fish: Shutdown: countdown: T-%v", time.Until(delay_end_time)) case <-delay_timer.C: // Delay time has passed, triggering shutdown fire_shutdown <- true diff --git a/lib/fish/node.go b/lib/fish/node.go index b0a2f6d..9e3cefa 100644 --- a/lib/fish/node.go +++ b/lib/fish/node.go @@ -75,18 +75,17 @@ func (f *Fish) NodeGet(name string) (node *types.Node, err error) { return node, err } -func (f *Fish) pingProcess() error { +func (f *Fish) pingProcess() { // In order to optimize network & database - update just UpdatedAt field ping_ticker := time.NewTicker(types.NODE_PING_DELAY * time.Second) for { if !f.running { break } - select { - case <-ping_ticker.C: - log.Debug("Fish Node: ping") - f.NodePing(f.node) - } + + // TODO: Here should be select with quit in case app is stopped to not wait next ticker + <-ping_ticker.C + log.Debug("Fish Node: ping") + f.NodePing(f.node) } - return nil } diff --git a/lib/fish/vote.go b/lib/fish/vote.go index 05f9dec..0c07c7b 100644 --- a/lib/fish/vote.go +++ b/lib/fish/vote.go @@ -46,7 +46,7 @@ func (f *Fish) VoteCreate(v *types.Vote) error { return fmt.Errorf("Fish: NodeUID can't be unset") } // Update Vote Rand to be actual rand - v.Rand = rand.Uint32() + v.Rand = rand.Uint32() // #nosec G404 v.UID = f.NewUID() return f.db.Create(v).Error } diff --git a/lib/openapi/api/api_v1.go b/lib/openapi/api/api_v1.go index ccfb4da..52389e8 100644 --- a/lib/openapi/api/api_v1.go +++ b/lib/openapi/api/api_v1.go @@ -61,15 +61,25 @@ func (e *Processor) BasicAuth(username, password string, c echo.Context) (bool, } func (e *Processor) UserMeGet(c echo.Context) error { - user := c.Get("user") + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + // Cleanup the hash to prevent malicious activity + user.Hash = crypt.Hash{} return c.JSON(http.StatusOK, user) } func (e *Processor) UserListGet(c echo.Context, params types.UserListGetParams) error { // Only admin can list users - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can list users")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can list users"}) return fmt.Errorf("Only 'admin' user can list users") } @@ -83,9 +93,13 @@ func (e *Processor) UserListGet(c echo.Context, params types.UserListGetParams) } func (e *Processor) UserGet(c echo.Context, name string) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get user")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get user"}) return fmt.Errorf("Only 'admin' user can get user") } @@ -108,11 +122,11 @@ func (e *Processor) UserCreateUpdatePost(c echo.Context) error { user, ok := c.Get("user").(*types.User) if !ok { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Not authentified")}) + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) return fmt.Errorf("Not authentified") } if user.Name != "admin" && user.Name != data.Name { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can create user and user can update itself")}) + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can create user and user can update itself"}) return fmt.Errorf("Only 'admin' user can create user and user can update itself") } @@ -149,9 +163,13 @@ func (e *Processor) UserCreateUpdatePost(c echo.Context) error { func (e *Processor) UserDelete(c echo.Context, name string) error { // Only admin can delete user - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can delete user")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can delete user"}) return fmt.Errorf("Only 'admin' user can delete user") } @@ -165,9 +183,13 @@ func (e *Processor) UserDelete(c echo.Context, name string) error { func (e *Processor) ResourceListGet(c echo.Context, params types.ResourceListGetParams) error { // Only admin can list the resources - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can list resource")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can list resource"}) return fmt.Errorf("Only 'admin' user can list resource") } @@ -182,9 +204,13 @@ func (e *Processor) ResourceListGet(c echo.Context, params types.ResourceListGet func (e *Processor) ResourceGet(c echo.Context, uid types.ResourceUID) error { // Only admin can get the resource directly - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get resource")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get resource"}) return fmt.Errorf("Only 'admin' user can get resource") } @@ -198,18 +224,32 @@ func (e *Processor) ResourceGet(c echo.Context, uid types.ResourceUID) error { } func (e *Processor) ResourceAccessPut(c echo.Context, uid types.ResourceUID) error { - // NOTE: `user` is already defined / non-nil. - user := c.Get("user") + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } - resource, err := e.fish.ResourceGet(uid) + res, err := e.fish.ResourceGet(uid) if err != nil { c.JSON(http.StatusNotFound, H{"message": fmt.Sprintf("Resource not found: %v", err)}) return fmt.Errorf("Resource not found: %w", err) } + // Only the owner and admin can create access for application resource + app, err := e.fish.ApplicationGet(res.ApplicationUID) + if err != nil { + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", res.ApplicationUID)}) + return fmt.Errorf("Unable to find the Application: %s, %w", res.ApplicationUID, err) + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner & admin can assign service mapping to the Application"}) + return fmt.Errorf("Only the owner & admin can assign service mapping to the Application") + } + r_access := types.ResourceAccess{ - ResourceUID: resource.UID, - Username: user.(*types.User).Name, + ResourceUID: res.UID, + Username: user.Name, Password: crypt.RandString(64), } e.fish.ResourceAccessCreate(&r_access) @@ -225,11 +265,15 @@ func (e *Processor) ApplicationListGet(c echo.Context, params types.ApplicationL } // Filter the output by owner - user := c.Get("user") - if user.(*types.User).Name != "admin" { + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { var owner_out []types.Application for _, app := range out { - if app.OwnerName == user.(*types.User).Name { + if app.OwnerName == user.Name { owner_out = append(owner_out, app) } } @@ -247,9 +291,13 @@ func (e *Processor) ApplicationGet(c echo.Context, uid types.ApplicationUID) err } // Only the owner of the application (or admin) can request it - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner and admin can request the Application")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner and admin can request the Application"}) return fmt.Errorf("Only the owner and admin can request the Application") } @@ -264,8 +312,12 @@ func (e *Processor) ApplicationCreatePost(c echo.Context) error { } // Set the User field out of the authorized user - user := c.Get("user") - data.OwnerName = user.(*types.User).Name + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + data.OwnerName = user.Name if err := e.fish.ApplicationCreate(&data); err != nil { c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to create application: %v", err)}) @@ -283,9 +335,13 @@ func (e *Processor) ApplicationResourceGet(c echo.Context, uid types.Application } // Only the owner of the application (or admin) can request the resource - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner and admin can request the Application resource")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner and admin can request the Application resource"}) return fmt.Errorf("Only the owner and admin can request the Application resource") } @@ -306,9 +362,13 @@ func (e *Processor) ApplicationStateGet(c echo.Context, uid types.ApplicationUID } // Only the owner of the application (or admin) can request the status - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner and admin can request the Application status")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner and admin can request the Application status"}) return fmt.Errorf("Only the owner and admin can request the Application status") } @@ -329,9 +389,13 @@ func (e *Processor) ApplicationTaskListGet(c echo.Context, app_uid types.Applica } // Only the owner of the application (or admin) could get the tasks - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner of Application & admin can get the Application Tasks")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner of Application & admin can get the Application Tasks"}) return fmt.Errorf("Only the owner of Application & admin can get the Application Tasks") } @@ -352,9 +416,13 @@ func (e *Processor) ApplicationTaskCreatePost(c echo.Context, app_uid types.Appl } // Only the owner of the application (or admin) could create the tasks - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner of Application & admin can create the Application Tasks")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner of Application & admin can create the Application Tasks"}) return fmt.Errorf("Only the owner of Application & admin can create the Application Tasks") } @@ -389,9 +457,13 @@ func (e *Processor) ApplicationTaskGet(c echo.Context, task_uid types.Applicatio } // Only the owner of the application (or admin) could get the attached task - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner of Application & admin can get the ApplicationTask")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner of Application & admin can get the ApplicationTask"}) return fmt.Errorf("Only the owner of Application & admin can get the ApplicationTask") } @@ -406,9 +478,13 @@ func (e *Processor) ApplicationDeallocateGet(c echo.Context, uid types.Applicati } // Only the owner of the application (or admin) could deallocate it - user := c.Get("user") - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner & admin can deallocate the Application resource")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner & admin can deallocate the Application resource"}) return fmt.Errorf("Only the owner & admin can deallocate the Application resource") } @@ -428,7 +504,7 @@ func (e *Processor) ApplicationDeallocateGet(c echo.Context, uid types.Applicati new_status = types.ApplicationStatusRECALLED } as := &types.ApplicationState{ApplicationUID: uid, Status: new_status, - Description: fmt.Sprintf("Requested by user %s", user.(*types.User).Name), + Description: fmt.Sprintf("Requested by user %s", user.Name), } err = e.fish.ApplicationStateCreate(as) if err != nil { @@ -461,9 +537,13 @@ func (e *Processor) LabelGet(c echo.Context, uid types.LabelUID) error { func (e *Processor) LabelCreatePost(c echo.Context) error { // Only admin can create label - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can create label")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can create label"}) return fmt.Errorf("Only 'admin' user can create label") } @@ -482,9 +562,13 @@ func (e *Processor) LabelCreatePost(c echo.Context) error { func (e *Processor) LabelDelete(c echo.Context, uid types.LabelUID) error { // Only admin can delete label - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can delete Label")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can delete Label"}) return fmt.Errorf("Only 'admin' user can delete label") } @@ -514,9 +598,13 @@ func (e *Processor) NodeThisGet(c echo.Context) error { } func (e *Processor) NodeThisMaintenanceGet(c echo.Context, params types.NodeThisMaintenanceGetParams) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' can set node maintenance")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' can set node maintenance"}) return fmt.Errorf("Only 'admin' user can set node maintenance") } @@ -550,11 +638,14 @@ func (e *Processor) NodeThisProfilingIndexGet(c echo.Context) error { } func (e *Processor) NodeThisProfilingGet(c echo.Context, handler string) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - message := "Only 'admin' can see profiling info" - c.JSON(http.StatusBadRequest, H{"message": message}) - return fmt.Errorf(message) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' can see profiling info"}) + return fmt.Errorf("Only 'admin' can see profiling info") } switch handler { @@ -573,18 +664,21 @@ func (e *Processor) NodeThisProfilingGet(c echo.Context, handler string) error { case "trace": pprof.Trace(c.Response(), c.Request()) default: - message := "Unable to find requested profiling handler" - c.JSON(http.StatusNotFound, H{"message": message}) - return fmt.Errorf(message) + c.JSON(http.StatusNotFound, H{"message": "Unable to find requested profiling handler"}) + return fmt.Errorf("Unable to find requested profiling handler") } return nil } func (e *Processor) VoteListGet(c echo.Context, params types.VoteListGetParams) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get votes")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get votes"}) return fmt.Errorf("Only 'admin' user can get votes") } @@ -598,9 +692,13 @@ func (e *Processor) VoteListGet(c echo.Context, params types.VoteListGetParams) } func (e *Processor) LocationListGet(c echo.Context, params types.LocationListGetParams) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get locations")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get locations"}) return fmt.Errorf("Only 'admin' user can get locations") } @@ -614,9 +712,13 @@ func (e *Processor) LocationListGet(c echo.Context, params types.LocationListGet } func (e *Processor) LocationCreatePost(c echo.Context) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can create location")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can create location"}) return fmt.Errorf("Only 'admin' user can create location") } @@ -635,9 +737,13 @@ func (e *Processor) LocationCreatePost(c echo.Context) error { } func (e *Processor) ServiceMappingGet(c echo.Context, uid types.ServiceMappingUID) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get service mapping")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get service mapping"}) return fmt.Errorf("Only 'admin' user can get service mapping") } @@ -651,9 +757,13 @@ func (e *Processor) ServiceMappingGet(c echo.Context, uid types.ServiceMappingUI } func (e *Processor) ServiceMappingListGet(c echo.Context, params types.ServiceMappingListGetParams) error { - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can get service mappings")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can get service mappings"}) return fmt.Errorf("Only 'admin' user can get service mappings") } @@ -673,7 +783,11 @@ func (e *Processor) ServiceMappingCreatePost(c echo.Context) error { return fmt.Errorf("Wrong request body: %w", err) } - user := c.Get("user") + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } if data.ApplicationUID != uuid.Nil { // Only the owner and admin can create servicemapping for his application app, err := e.fish.ApplicationGet(data.ApplicationUID) @@ -682,12 +796,12 @@ func (e *Processor) ServiceMappingCreatePost(c echo.Context) error { return fmt.Errorf("Unable to find the Application: %s, %w", data.ApplicationUID, err) } - if app.OwnerName != user.(*types.User).Name && user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only the owner & admin can assign service mapping to the Application")}) + if app.OwnerName != user.Name && user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only the owner & admin can assign service mapping to the Application"}) return fmt.Errorf("Only the owner & admin can assign service mapping to the Application") } - } else if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can create service mapping with undefined Application")}) + } else if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can create service mapping with undefined Application"}) return fmt.Errorf("Only 'admin' user can create service mapping with undefined Application") } @@ -701,9 +815,13 @@ func (e *Processor) ServiceMappingCreatePost(c echo.Context) error { func (e *Processor) ServiceMappingDelete(c echo.Context, uid types.ServiceMappingUID) error { // Only admin can delete ServiceMapping - user := c.Get("user") - if user.(*types.User).Name != "admin" { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Only 'admin' user can delete service mapping")}) + user, ok := c.Get("user").(*types.User) + if !ok { + c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) + return fmt.Errorf("Not authentified") + } + if user.Name != "admin" { + c.JSON(http.StatusBadRequest, H{"message": "Only 'admin' user can delete service mapping"}) return fmt.Errorf("Only 'admin' user can delete service mapping") } diff --git a/lib/openapi/cluster/client.go b/lib/openapi/cluster/client.go deleted file mode 100644 index e78b7e7..0000000 --- a/lib/openapi/cluster/client.go +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2021 Adobe. All rights reserved. - * This file is licensed to you under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. You may obtain a copy - * of the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under - * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS - * OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - -package cluster - -import ( - "bytes" - "fmt" - "time" - - "github.com/gorilla/websocket" - // "github.com/adobe/aquarium-fish/lib/cluster/msg" -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 512 -) - -var ( - newline = []byte{'\n'} - space = []byte{' '} -) - -// Client is a middleman between the websocket connection and the hub. -type Client struct { - hub *Hub - - // The websocket connection. - conn *websocket.Conn - - // Buffered channel of outbound messages. - send chan []byte -} - -// readPump pumps messages from the websocket connection to the hub. -// -// The application runs readPump in a per-connection goroutine. The application -// ensures that there is at most one reader on a connection by executing all -// reads from this goroutine. -func (c *Client) readPump() { - defer func() { - c.hub.unregister <- c - c.conn.Close() - }() - c.conn.SetReadLimit(maxMessageSize) - c.conn.SetReadDeadline(time.Now().Add(pongWait)) - c.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := c.conn.ReadMessage() - if err != nil { - if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { - fmt.Printf("Cluster: Client %v readPump: reading error\n", c.conn.RemoteAddr()) - } - break - } - message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1)) - //fmt.Printf("Cluster: Client %v readPump: got: %s\n", c.conn.RemoteAddr(), message) - c.hub.broadcast <- message - } -} - -// writePump pumps messages from the hub to the websocket connection. -// -// A goroutine running writePump is started for each connection. The -// application ensures that there is at most one writer to a connection by -// executing all writes from this goroutine. -func (c *Client) writePump() { - ticker := time.NewTicker(pingPeriod) - defer func() { - ticker.Stop() - c.conn.Close() - }() - for { - select { - case message, ok := <-c.send: - c.conn.SetWriteDeadline(time.Now().Add(writeWait)) - if !ok { - // The hub closed the channel. - c.conn.WriteMessage(websocket.CloseMessage, []byte{}) - return - } - - w, err := c.conn.NextWriter(websocket.TextMessage) - if err != nil { - return - } - w.Write(message) - - // Add queued chat messages to the current websocket message. - n := len(c.send) - for i := 0; i < n; i++ { - w.Write(newline) - w.Write(<-c.send) - } - - if err := w.Close(); err != nil { - return - } - case <-ticker.C: - c.conn.SetWriteDeadline(time.Now().Add(writeWait)) - if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil { - return - } - } - } -} - -// Init the connection by send the active data to just connected node -func (c *Client) init() error { - // TODO - //active_nodes := msg.NewNodes() - //active_apps := msg.NewApplications() - //last_votes := msg.NewVotes() - //c.conn.Write() - return nil -} diff --git a/lib/openapi/cluster/cluster_v1.go b/lib/openapi/cluster/cluster_v1.go deleted file mode 100644 index 220e789..0000000 --- a/lib/openapi/cluster/cluster_v1.go +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2021 Adobe. All rights reserved. - * This file is licensed to you under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. You may obtain a copy - * of the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under - * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS - * OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - -package cluster - -import ( - "crypto/x509" - "fmt" - "net/http" - - "github.com/gorilla/websocket" - "github.com/labstack/echo/v4" - - "github.com/adobe/aquarium-fish/lib/cluster" - "github.com/adobe/aquarium-fish/lib/fish" - "github.com/adobe/aquarium-fish/lib/log" -) - -// H is a shortcut for map[string]any -type H map[string]any - -type Processor struct { - fish *fish.Fish - upgrader websocket.Upgrader - - hub *Hub -} - -func NewV1Router(e *echo.Echo, fish *fish.Fish, cl *cluster.Cluster) { - hub := &Hub{ - broadcast: make(chan []byte), - register: make(chan *Client), - unregister: make(chan *Client), - clients: make(map[*Client]bool), - } - go hub.Run() - proc := &Processor{ - fish: fish, - upgrader: websocket.Upgrader{ - EnableCompression: true, - }, - hub: hub, - } - router := e.Group("") - router.Use( - // The connected client should have valid cluster signed certificate - proc.ClientCertAuth, - ) - router.GET("/cluster/v1/connect", proc.ClusterConnect) -} - -func (e *Processor) ClientCertAuth(next echo.HandlerFunc) echo.HandlerFunc { - return func(c echo.Context) error { - // The connecting client should have the valid to cluster CA certificate, with the CN of - // the node name, pubkey need be the same as stored (or first time registration) in cluster - // nodes table and the time of last ping need to be more than ping delay time x2 - - if len(c.Request().TLS.PeerCertificates) == 0 { - return echo.NewHTTPError(http.StatusUnauthorized, "Client certificate is not provided") - } - - var valid_client_cert *x509.Certificate - for _, crt := range c.Request().TLS.PeerCertificates { - // Validation over cluster CA cert - opts := x509.VerifyOptions{ - Roots: c.Echo().TLSServer.TLSConfig.ClientCAs, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - _, err := crt.Verify(opts) - if err != nil { - log.Warn(fmt.Sprintf("Cluster: Client %s (%s) certificate CA verify failed:", - crt.Subject.CommonName, c.RealIP()), err) - continue - } - - // TODO: Check the node in db by CA as NodeName and if exists compare the pubkey - log.Debug("Cluster: Client certificate CN:", crt.Subject.CommonName) - der, err := x509.MarshalPKIXPublicKey(crt.PublicKey) - if err != nil { - continue - } - log.Debug("Cluster: Client certificate pubkey der:", der) - - valid_client_cert = crt - } - - if valid_client_cert == nil { - return echo.NewHTTPError(http.StatusUnauthorized, "Client certificate is invalid") - } - - c.Set("client_cert", valid_client_cert) - - //res, err := e.fish.ResourceGetByIP(c.RealIP()) - //if err != nil { - // return echo.NewHTTPError(http.StatusUnauthorized, "Client IP was not found in the node Resources") - //} - - return next(c) - } -} - -func (e *Processor) ClusterConnect(c echo.Context) error { - ws, err := e.upgrader.Upgrade(c.Response(), c.Request(), nil) - if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to connect with the cluster: %v", err)}) - return fmt.Errorf("Unable to connect with the cluster: %w", err) - } - client := &Client{hub: e.hub, conn: ws, send: make(chan []byte, 256)} - e.hub.register <- client - - // Starting the new connected client processes - go client.writePump() - go client.readPump() - go client.init() - - return nil -} diff --git a/lib/openapi/cluster/hub.go b/lib/openapi/cluster/hub.go deleted file mode 100644 index 7c79551..0000000 --- a/lib/openapi/cluster/hub.go +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2021 Adobe. All rights reserved. - * This file is licensed to you under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. You may obtain a copy - * of the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under - * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS - * OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - -package cluster - -import ( - "fmt" -) - -// Hub maintains the set of active clients and broadcasts messages to the -// clients. -type Hub struct { - // Registered clients. - clients map[*Client]bool - - // Inbound messages from the clients. - broadcast chan []byte - - // Register requests from the clients. - register chan *Client - - // Unregister requests from clients. - unregister chan *Client -} - -func (h *Hub) Run() { - for { - select { - case client := <-h.register: - h.clients[client] = true - case client := <-h.unregister: - if _, ok := h.clients[client]; ok { - delete(h.clients, client) - close(client.send) - fmt.Println("Cluster: Hub: connection closed") - } - case <-h.broadcast: - for client := range h.clients { - select { - case client.send <- []byte("acknowledge"): - default: - close(client.send) - delete(h.clients, client) - } - } - } - } -} diff --git a/lib/openapi/openapi.go b/lib/openapi/openapi.go index aeda097..81eca76 100644 --- a/lib/openapi/openapi.go +++ b/lib/openapi/openapi.go @@ -33,11 +33,9 @@ import ( _ "github.com/oapi-codegen/oapi-codegen/v2/pkg/util" "gopkg.in/yaml.v3" - "github.com/adobe/aquarium-fish/lib/cluster" "github.com/adobe/aquarium-fish/lib/fish" "github.com/adobe/aquarium-fish/lib/log" "github.com/adobe/aquarium-fish/lib/openapi/api" - cluster_server "github.com/adobe/aquarium-fish/lib/openapi/cluster" "github.com/adobe/aquarium-fish/lib/openapi/meta" ) @@ -66,7 +64,7 @@ func (cb *YamlBinder) Bind(i any, c echo.Context) (err error) { return } -func Init(fish *fish.Fish, cl *cluster.Cluster, api_address, ca_path, cert_path, key_path string) (*http.Server, error) { +func Init(fish *fish.Fish, api_address, ca_path, cert_path, key_path string) (*http.Server, error) { swagger, err := GetSwagger() if err != nil { return nil, fmt.Errorf("Fish OpenAPI: Error loading swagger spec: %w", err) @@ -86,9 +84,8 @@ func Init(fish *fish.Fish, cl *cluster.Cluster, api_address, ca_path, cert_path, router.HideBanner = true // TODO: Probably it will be a feature an ability to separate those - // routers to independance ports if needed + // routers to independence ports if needed meta.NewV1Router(router, fish) - cluster_server.NewV1Router(router, fish, cl) api.NewV1Router(router, fish) // TODO: web UI router @@ -98,11 +95,10 @@ func Init(fish *fish.Fish, cl *cluster.Cluster, api_address, ca_path, cert_path, } s := router.TLSServer s.Addr = api_address - s.TLSConfig = &tls.Config{ + s.TLSConfig = &tls.Config{ // #nosec G402 , keep the compatibility high since not public access ClientAuth: tls.RequestClientCert, // Need for the client certificate auth ClientCAs: ca_pool, // Verify client certificate with the cluster CA } - s.TLSConfig.BuildNameToCertificate() errChan := make(chan error) go func() { addr := s.Addr diff --git a/lib/openapi/types/node.go b/lib/openapi/types/node.go index 58e2f06..6957147 100644 --- a/lib/openapi/types/node.go +++ b/lib/openapi/types/node.go @@ -22,7 +22,7 @@ import ( const NODE_PING_DELAY = 10 -var NodePingDuplicationErr = fmt.Errorf("Fish Node: Unable to join the Aquarium cluster due to " + +var ErrNodePingDuplication = fmt.Errorf("Fish Node: Unable to join the Aquarium cluster due to " + "the node with the same name pinged the cluster less then 2xNODE_PING_DELAY time ago") func (n *Node) Init(node_address, cert_path string) error { @@ -51,7 +51,7 @@ func (n *Node) Init(node_address, cert_path string) error { n.Pubkey = &pubkey_der } else { // Validate the existing pubkey - if bytes.Compare(*n.Pubkey, pubkey_der) != 0 { + if !bytes.Equal(*n.Pubkey, pubkey_der) { return fmt.Errorf("Fish Node: The pubkey was changed for Node, that's not supported") } } diff --git a/lib/openapi/types/resources.go b/lib/openapi/types/resources.go index b735b60..e39d4d1 100644 --- a/lib/openapi/types/resources.go +++ b/lib/openapi/types/resources.go @@ -62,7 +62,7 @@ func (r *Resources) Validate(disk_types []string, check_net bool) error { return fmt.Errorf("Resources: Disk name can't be empty") } if len(disk_types) > 0 && !util.Contains(disk_types, disk.Type) { - return fmt.Errorf(fmt.Sprintf("Resources: Type of disk must be one of: %+q", disk_types)) + return fmt.Errorf("Resources: Type of disk must be one of: %+q", disk_types) } if disk.Size < 1 { return fmt.Errorf("Resources: Size of the disk can't be less than 1GB") diff --git a/lib/proxy_ssh/proxy.go b/lib/proxy_ssh/proxy.go index b81f1d1..18187b6 100644 --- a/lib/proxy_ssh/proxy.go +++ b/lib/proxy_ssh/proxy.go @@ -118,7 +118,7 @@ func (p *ProxyAccess) serveConnection(conn net.Conn, serverConfig *ssh.ServerCon Auth: []ssh.AuthMethod{ ssh.Password(resource.Authentication.Password), }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), + HostKeyCallback: ssh.InsecureIgnoreHostKey(), // #nosec G106 , remote always have new hostkey by design } remoteConn, err := ssh.Dial("tcp", remoteAddr, remoteConfig) if err != nil { diff --git a/lib/util/file_replace_token.go b/lib/util/file_replace_token.go index cf6e7d5..405eb07 100644 --- a/lib/util/file_replace_token.go +++ b/lib/util/file_replace_token.go @@ -16,7 +16,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -36,7 +35,7 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. } // Open output file - out_f, err := ioutil.TempFile(filepath.Dir(path), "tmp") + out_f, err := os.CreateTemp(filepath.Dir(path), "tmp") if err != nil { return err } diff --git a/lib/util/file_replace_token_test.go b/lib/util/file_replace_token_test.go index e676ada..ebe1470 100644 --- a/lib/util/file_replace_token_test.go +++ b/lib/util/file_replace_token_test.go @@ -31,7 +31,7 @@ func Test_file_replace_token_simple_proceed(t *testing.T) { "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, false, @@ -57,7 +57,7 @@ func Test_file_replace_token_simple_skip_uppercase_src(t *testing.T) { "test4 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, false, @@ -83,7 +83,7 @@ func Test_file_replace_token_simple_skip_uppercase_token(t *testing.T) { "test4 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, false, @@ -109,7 +109,7 @@ func Test_file_replace_token_anycase_token_proceed(t *testing.T) { "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, true, @@ -135,7 +135,7 @@ func Test_file_replace_token_anycase_src_proceed(t *testing.T) { "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, true, @@ -161,7 +161,7 @@ func Test_file_replace_token_anycase_multiple(t *testing.T) { "test4 test5 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, false, true, @@ -186,7 +186,7 @@ func Test_file_replace_token_add(t *testing.T) { "test4 test5 test6\n" + "test5\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, true, false, @@ -212,7 +212,7 @@ func Test_file_replace_token_do_not_add_if_replaced(t *testing.T) { "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, false, true, false, @@ -238,7 +238,7 @@ func Test_file_replace_token_full_line(t *testing.T) { "test5\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) FileReplaceToken(tmp_file, true, false, false, diff --git a/lib/util/file_starts_with_test.go b/lib/util/file_starts_with_test.go index 55a83aa..80cde10 100644 --- a/lib/util/file_starts_with_test.go +++ b/lib/util/file_starts_with_test.go @@ -25,7 +25,7 @@ func TestFileStartsWithGood(t *testing.T) { "test1 test2 test3\n" + "test4 test5 test6\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) if err := FileStartsWith(tmp_file, []byte("test1 ")); err != nil { t.Fatalf(`FileStartsWith("test1 ") = %v, want: nil`, err) @@ -39,7 +39,7 @@ func TestFileStartsNotEqual(t *testing.T) { "test1 test2 test3\n" + "test4 test5 test6\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) if err := FileStartsWith(tmp_file, []byte("test2 ")); err != ErrFileStartsWithNotEqual { t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithNotEqual) @@ -59,7 +59,7 @@ func TestFileStartsSmall(t *testing.T) { in_data := []byte("small file\n") - os.WriteFile(tmp_file, in_data, 0644) + os.WriteFile(tmp_file, in_data, 0o644) if err := FileStartsWith(tmp_file, []byte("biiiiiiiiiig prefix")); err != ErrFileStartsWithFileTooSmall { t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithFileTooSmall) diff --git a/lib/util/human_size.go b/lib/util/human_size.go index a6cc4d0..f3b1bd0 100644 --- a/lib/util/human_size.go +++ b/lib/util/human_size.go @@ -51,8 +51,8 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { // Detecting unit & multiplier var mult HumanSize = 0 - unit := "" - unit_len := 0 + var unit string + var unit_len int if length > 1 { unit = input[length-2:] unit_len = 2 @@ -82,13 +82,10 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { unit_len = 1 } else if unit[1] >= '0' && unit[1] <= '9' { unit_len = 0 - } else { - mult = 0 } } else { unit_len = 0 } - unit = "B" mult = B } } @@ -119,7 +116,7 @@ func (hs HumanSize) Bytes() uint64 { func (hs HumanSize) String() string { switch { case hs == 0: - return fmt.Sprint("0B") + return "0B" case hs%EB == 0: return fmt.Sprintf("%dEB", hs/EB) case hs%PB == 0: diff --git a/lib/util/lock.go b/lib/util/lock.go index a6339f2..93a196a 100644 --- a/lib/util/lock.go +++ b/lib/util/lock.go @@ -33,7 +33,8 @@ func CreateLock(lock_path string) error { } // Writing pid into the file for additional info - lock_file.Write([]byte(fmt.Sprintf("%d", os.Getpid()))) + data := []byte(fmt.Sprintf("%d", os.Getpid())) + lock_file.Write(data) lock_file.Close() return nil diff --git a/lib/util/metadata_processing.go b/lib/util/metadata_processing.go index a49f22b..5048ce6 100644 --- a/lib/util/metadata_processing.go +++ b/lib/util/metadata_processing.go @@ -16,7 +16,7 @@ func SerializeMetadata(format, prefix string, data map[string]any) (out []byte, case "env": // Plain format suitable to use in shell m := DotSerialize(prefix, data) for key, val := range m { - line := cleanShellKey(strings.Replace(shellescape.StripUnsafe(key), ".", "_", -1)) + line := cleanShellKey(strings.ReplaceAll(shellescape.StripUnsafe(key), ".", "_")) if len(line) == 0 { continue } @@ -26,7 +26,7 @@ func SerializeMetadata(format, prefix string, data map[string]any) (out []byte, case "export": // Format env with exports for easy usage with source m := DotSerialize(prefix, data) for key, val := range m { - line := cleanShellKey(strings.Replace(shellescape.StripUnsafe(key), ".", "_", -1)) + line := cleanShellKey(strings.ReplaceAll(shellescape.StripUnsafe(key), ".", "_")) if len(line) == 0 { continue } @@ -37,12 +37,12 @@ func SerializeMetadata(format, prefix string, data map[string]any) (out []byte, case "ps1": // Plain format suitable to use in powershell m := DotSerialize(prefix, data) for key, val := range m { - line := cleanShellKey(strings.Replace(shellescape.StripUnsafe(key), ".", "_", -1)) + line := cleanShellKey(strings.ReplaceAll(shellescape.StripUnsafe(key), ".", "_")) if len(line) == 0 { continue } // Shell quote is not applicable here, so using the custom one - value := []byte("='" + strings.Replace(val, "'", "''", -1) + "'\n") + value := []byte("='" + strings.ReplaceAll(val, "'", "''") + "'\n") out = append(out, append([]byte("$"), append(line, value...)...)...) } default: diff --git a/lib/util/passthrough_monitor.go b/lib/util/passthrough_monitor.go index 9df491e..e29bbf3 100644 --- a/lib/util/passthrough_monitor.go +++ b/lib/util/passthrough_monitor.go @@ -41,7 +41,7 @@ func (pt *PassThruMonitor) Read(p []byte) (int, error) { if n > 0 { pt.total += int64(n) percentage := float64(pt.total) / float64(pt.Length) * float64(100) - if percentage-pt.progress > 10 || time.Now().Sub(pt.print_ts) > 30*time.Second { + if percentage-pt.progress > 10 || time.Since(pt.print_ts) > 30*time.Second { // Show status every 10% or 30 sec log.Infof("%s: %v%% (%dB)", pt.Name, int(percentage), pt.total) pt.progress = percentage diff --git a/tests/helper/copy.go b/tests/helper/copy.go index aacfad8..b8552d4 100644 --- a/tests/helper/copy.go +++ b/tests/helper/copy.go @@ -26,7 +26,7 @@ func CopyFile(src, dst string) error { } defer fin.Close() - os.MkdirAll(filepath.Dir(dst), 0755) + os.MkdirAll(filepath.Dir(dst), 0o755) fout, err := os.Create(dst) if err != nil { return err diff --git a/tests/helper/fish.go b/tests/helper/fish.go index d7d0c50..cb93786 100644 --- a/tests/helper/fish.go +++ b/tests/helper/fish.go @@ -39,51 +39,55 @@ type AFInstance struct { } // Simple creates and run the fish node -func NewAquariumFish(t testing.TB, name, cfg string, args ...string) *AFInstance { - afi := NewAfInstance(t, name, cfg) - afi.Start(t, args...) +func NewAquariumFish(tb testing.TB, name, cfg string, args ...string) *AFInstance { + tb.Helper() + afi := NewAfInstance(tb, name, cfg) + afi.Start(tb, args...) return afi } // If you need to create instance without starting it up right away -func NewAfInstance(t testing.TB, name, cfg string) *AFInstance { - t.Log("INFO: Creating new node:", name) +func NewAfInstance(tb testing.TB, name, cfg string) *AFInstance { + tb.Helper() + tb.Log("INFO: Creating new node:", name) afi := &AFInstance{ node_name: name, } - afi.workspace = t.TempDir() - t.Log("INFO: Created workspace:", afi.node_name, afi.workspace) + afi.workspace = tb.TempDir() + tb.Log("INFO: Created workspace:", afi.node_name, afi.workspace) cfg += fmt.Sprintf("\nnode_name: %q", afi.node_name) - os.WriteFile(filepath.Join(afi.workspace, "config.yml"), []byte(cfg), 0644) - t.Log("INFO: Stored config:", cfg) + os.WriteFile(filepath.Join(afi.workspace, "config.yml"), []byte(cfg), 0o600) + tb.Log("INFO: Stored config:", cfg) return afi } // Start another node of cluster // It will automatically add cluster_join parameter to the config -func (afi1 *AFInstance) NewClusterNode(t testing.TB, name, cfg string, args ...string) *AFInstance { - afi2 := afi1.NewAfInstanceCluster(t, name, cfg) - afi2.Start(t, args...) +func (afi1 *AFInstance) NewClusterNode(tb testing.TB, name, cfg string, args ...string) *AFInstance { + tb.Helper() + afi2 := afi1.NewAfInstanceCluster(tb, name, cfg) + afi2.Start(tb, args...) return afi2 } // Just create the node based on the existing cluster node -func (afi1 *AFInstance) NewAfInstanceCluster(t testing.TB, name, cfg string) *AFInstance { - t.Log("INFO: Creating new cluster node with seed node:", afi1.node_name) +func (afi1 *AFInstance) NewAfInstanceCluster(tb testing.TB, name, cfg string) *AFInstance { + tb.Helper() + tb.Log("INFO: Creating new cluster node with seed node:", afi1.node_name) cfg += fmt.Sprintf("\ncluster_join: [%q]", afi1.endpoint) - afi2 := NewAfInstance(t, name, cfg) + afi2 := NewAfInstance(tb, name, cfg) // Copy seed node CA to generate valid cluster node cert if err := CopyFile(filepath.Join(afi1.workspace, "fish_data", "ca.key"), filepath.Join(afi2.workspace, "fish_data", "ca.key")); err != nil { - t.Fatalf("ERROR: Unable to copy CA key: %v", err) + tb.Fatalf("ERROR: Unable to copy CA key: %v", err) } if err := CopyFile(filepath.Join(afi1.workspace, "fish_data", "ca.crt"), filepath.Join(afi2.workspace, "fish_data", "ca.crt")); err != nil { - t.Fatalf("ERROR: Unable to copy CA crt: %v", err) + tb.Fatalf("ERROR: Unable to copy CA crt: %v", err) } return afi2 @@ -115,21 +119,24 @@ func (afi *AFInstance) IsRunning() bool { } // Restart the application -func (afi *AFInstance) Restart(t testing.TB, args ...string) { - t.Log("INFO: Restarting:", afi.node_name, afi.workspace) - afi.Stop(t) - afi.Start(t, args...) +func (afi *AFInstance) Restart(tb testing.TB, args ...string) { + tb.Helper() + tb.Log("INFO: Restarting:", afi.node_name, afi.workspace) + afi.Stop(tb) + afi.Start(tb, args...) } // Cleanup after the test execution -func (afi *AFInstance) Cleanup(t testing.TB) { - t.Log("INFO: Cleaning up:", afi.node_name, afi.workspace) - afi.Stop(t) +func (afi *AFInstance) Cleanup(tb testing.TB) { + tb.Helper() + tb.Log("INFO: Cleaning up:", afi.node_name, afi.workspace) + afi.Stop(tb) os.RemoveAll(afi.workspace) } // Stops the fish node executable -func (afi *AFInstance) Stop(t testing.TB) { +func (afi *AFInstance) Stop(tb testing.TB) { + tb.Helper() if afi.cmd == nil || !afi.running { return } @@ -137,7 +144,7 @@ func (afi *AFInstance) Stop(t testing.TB) { afi.cmd.Process.Signal(os.Interrupt) // Wait 10 seconds for process to stop - t.Log("INFO: Wait 10s for fish node to stop:", afi.node_name, afi.workspace) + tb.Log("INFO: Wait 10s for fish node to stop:", afi.node_name, afi.workspace) for i := 1; i < 20; i++ { if !afi.running { return @@ -150,9 +157,10 @@ func (afi *AFInstance) Stop(t testing.TB) { } // Starts the fish node executable -func (afi *AFInstance) Start(t testing.TB, args ...string) { +func (afi *AFInstance) Start(tb testing.TB, args ...string) { + tb.Helper() if afi.running { - t.Fatalf("ERROR: Fish node %q can't be started since already started", afi.node_name) + tb.Fatalf("ERROR: Fish node %q can't be started since already started", afi.node_name) return } ctx, cancel := context.WithCancel(context.Background()) @@ -172,7 +180,7 @@ func (afi *AFInstance) Start(t testing.TB, args ...string) { // Listening for log and scan for token and address for scanner.Scan() { line := scanner.Text() - t.Log(afi.node_name, line) + tb.Log(afi.node_name, line) if strings.HasPrefix(line, "Admin user pass: ") { val := strings.SplitN(strings.TrimSpace(line), "Admin user pass: ", 2) if len(val) < 2 { @@ -195,7 +203,7 @@ func (afi *AFInstance) Start(t testing.TB, args ...string) { init_done <- "" } } - t.Log("INFO: Reading of AquariumFish output is done") + tb.Log("INFO: Reading of AquariumFish output is done") }() afi.cmd.Start() @@ -207,7 +215,7 @@ func (afi *AFInstance) Start(t testing.TB, args ...string) { r.Close() }() if err := afi.cmd.Wait(); err != nil { - t.Log("WARN: AquariumFish process was stopped:", err) + tb.Log("WARN: AquariumFish process was stopped:", err) init_done <- fmt.Sprintf("ERROR: Fish was stopped with exit code: %v", err) } }() @@ -215,6 +223,6 @@ func (afi *AFInstance) Start(t testing.TB, args ...string) { failed := <-init_done if failed != "" { - t.Fatalf("ERROR: Failed to init node %q: %s", afi.node_name, failed) + tb.Fatalf("ERROR: Failed to init node %q: %s", afi.node_name, failed) } } diff --git a/tests/helper/t_mock.go b/tests/helper/t_mock.go index de19e45..d7b13b7 100644 --- a/tests/helper/t_mock.go +++ b/tests/helper/t_mock.go @@ -51,6 +51,7 @@ func (m *MockT) Fatalf(format string, args ...any) { } func ExpectFailure(t *testing.T, f func(tt testing.TB)) { + t.Helper() var wg sync.WaitGroup mock_t := &MockT{t: t} From 89a4a9cd694299ceb8d9ef6f7b5c6a6f6d4abf3d Mon Sep 17 00:00:00 2001 From: Jerry Wiltse Date: Fri, 6 Sep 2024 16:14:07 -0400 Subject: [PATCH 2/6] Convert all snake_case variables to camelCase per golang convention (#85) Co-authored-by: Jerry Wiltse --- cmd/fish/fish.go | 104 ++--- lib/crypt/crypt.go | 46 +-- lib/crypt/init_tls_pair_ca.go | 90 ++--- lib/drivers/aws/dedicated_pool.go | 290 +++++++------- lib/drivers/aws/driver.go | 280 +++++++------- lib/drivers/aws/task_image.go | 112 +++--- lib/drivers/aws/task_snapshot.go | 24 +- lib/drivers/aws/util.go | 302 +++++++-------- lib/drivers/docker/driver.go | 182 ++++----- lib/drivers/docker/options.go | 8 +- lib/drivers/docker/util.go | 198 +++++----- lib/drivers/driver.go | 2 +- lib/drivers/image.go | 114 +++--- lib/drivers/image_test.go | 26 +- lib/drivers/native/config.go | 26 +- lib/drivers/native/driver.go | 66 ++-- lib/drivers/native/options.go | 14 +- lib/drivers/native/util.go | 196 +++++----- lib/drivers/test/driver.go | 68 ++-- lib/drivers/test/tasks.go | 4 +- lib/drivers/vmx/config.go | 14 +- lib/drivers/vmx/driver.go | 134 +++---- lib/drivers/vmx/options.go | 8 +- lib/drivers/vmx/util.go | 198 +++++----- lib/fish/application.go | 8 +- lib/fish/application_state.go | 4 +- lib/fish/application_task.go | 8 +- lib/fish/config.go | 6 +- lib/fish/drivers.go | 22 +- lib/fish/fish.go | 360 +++++++++--------- lib/fish/label.go | 4 +- lib/fish/location.go | 4 +- lib/fish/node.go | 10 +- lib/fish/resource.go | 24 +- lib/fish/resource_access.go | 4 +- lib/fish/servicemapping.go | 4 +- lib/fish/user.go | 6 +- lib/fish/vote.go | 20 +- lib/openapi/api/api_v1.go | 58 +-- lib/openapi/meta/meta_v1.go | 4 +- lib/openapi/openapi.go | 14 +- lib/openapi/types/node.go | 16 +- lib/openapi/types/resources.go | 12 +- lib/proxy_socks/proxy.go | 8 +- lib/proxy_ssh/proxy.go | 30 +- lib/util/dot_serialize.go | 8 +- lib/util/expression_sql_filter_test.go | 4 +- lib/util/file_replace_block.go | 40 +- lib/util/file_replace_token.go | 40 +- lib/util/file_replace_token_test.go | 144 +++---- lib/util/file_starts_with.go | 8 +- lib/util/file_starts_with_test.go | 28 +- lib/util/human_size.go | 16 +- lib/util/human_size_test.go | 4 +- lib/util/lock.go | 34 +- lib/util/passthrough_monitor.go | 6 +- lib/util/streamlog_monitor.go | 8 +- lib/util/unparsed_json.go | 4 +- tests/allocate_apps_stress_test.go | 4 +- tests/allocate_multidefinition_label_test.go | 38 +- .../application_task_notexisting_fail_test.go | 36 +- .../application_task_snapshot_by_user_test.go | 64 ++-- tests/cant_allocate_too_big_label_test.go | 32 +- tests/default_lifetime_timeout_test.go | 20 +- ...nerated_uids_prefix_is_node_prefix_test.go | 22 +- tests/helper/fish.go | 50 +-- tests/helper/t_mock.go | 6 +- tests/label_lifetime_timeout_test.go | 20 +- ...overrides_default_lifetime_timeout_test.go | 20 +- tests/maintenance_mode_test.go | 14 +- tests/multiple_driver_instances_test.go | 44 +-- tests/node_filter_test.go | 32 +- tests/shutdown_mode_test.go | 38 +- tests/simple_app_create_destroy_test.go | 14 +- ...three_apps_with_limit_fish_restart_test.go | 62 +-- tests/three_apps_with_limit_test.go | 44 +-- tests/two_apps_with_limit_test.go | 32 +- 77 files changed, 2034 insertions(+), 2034 deletions(-) diff --git a/cmd/fish/fish.go b/cmd/fish/fish.go index c22b87f..b1a65bb 100644 --- a/cmd/fish/fish.go +++ b/cmd/fish/fish.go @@ -39,26 +39,26 @@ import ( func main() { log.Infof("Aquarium Fish %s (%s)", build.Version, build.Time) - var api_address string - var proxy_socks_address string - var proxy_ssh_address string - var node_address string - var cfg_path string + var apiAddress string + var proxySocksAddress string + var proxySshAddress string + var nodeAddress string + var cfgPath string var dir string - var cpu_limit string - var mem_target string - var log_verbosity string - var log_timestamp bool + var cpuLimit string + var memTarget string + var logVerbosity string + var logTimestamp bool cmd := &cobra.Command{ Use: "aquarium-fish", Short: "Aquarium fish", Long: `Part of the Aquarium suite - a distributed resources manager`, PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - if err = log.SetVerbosity(log_verbosity); err != nil { + if err = log.SetVerbosity(logVerbosity); err != nil { return err } - log.UseTimestamp = log_timestamp + log.UseTimestamp = logTimestamp return log.InitLoggers() }, @@ -66,33 +66,33 @@ func main() { log.Info("Fish init...") cfg := &fish.Config{} - if err = cfg.ReadConfigFile(cfg_path); err != nil { - return log.Error("Fish: Unable to apply config file:", cfg_path, err) + if err = cfg.ReadConfigFile(cfgPath); err != nil { + return log.Error("Fish: Unable to apply config file:", cfgPath, err) } - if api_address != "" { - cfg.APIAddress = api_address + if apiAddress != "" { + cfg.APIAddress = apiAddress } - if proxy_socks_address != "" { - cfg.ProxySocksAddress = proxy_socks_address + if proxySocksAddress != "" { + cfg.ProxySocksAddress = proxySocksAddress } - if proxy_ssh_address != "" { - cfg.ProxySshAddress = proxy_ssh_address + if proxySshAddress != "" { + cfg.ProxySshAddress = proxySshAddress } - if node_address != "" { - cfg.NodeAddress = node_address + if nodeAddress != "" { + cfg.NodeAddress = nodeAddress } if dir != "" { cfg.Directory = dir } - if cpu_limit != "" { - val, err := strconv.ParseUint(cpu_limit, 10, 16) + if cpuLimit != "" { + val, err := strconv.ParseUint(cpuLimit, 10, 16) if err != nil { return log.Errorf("Fish: Unable to parse cpu limit value: %v", err) } cfg.CpuLimit = uint16(val) } - if mem_target != "" { - if cfg.MemTarget, err = util.NewHumanSize(mem_target); err != nil { + if memTarget != "" { + if cfg.MemTarget, err = util.NewHumanSize(memTarget); err != nil { return log.Errorf("Fish: Unable to parse mem target value: %v", err) } } @@ -113,19 +113,19 @@ func main() { } log.Info("Fish init TLS...") - ca_path := cfg.TLSCaCrt - if !filepath.IsAbs(ca_path) { - ca_path = filepath.Join(cfg.Directory, ca_path) + caPath := cfg.TLSCaCrt + if !filepath.IsAbs(caPath) { + caPath = filepath.Join(cfg.Directory, caPath) } - key_path := cfg.TLSKey - if !filepath.IsAbs(key_path) { - key_path = filepath.Join(cfg.Directory, key_path) + keyPath := cfg.TLSKey + if !filepath.IsAbs(keyPath) { + keyPath = filepath.Join(cfg.Directory, keyPath) } - cert_path := cfg.TLSCrt - if !filepath.IsAbs(cert_path) { - cert_path = filepath.Join(cfg.Directory, cert_path) + certPath := cfg.TLSCrt + if !filepath.IsAbs(certPath) { + certPath = filepath.Join(cfg.Directory, certPath) } - if err = crypt.InitTlsPairCa([]string{cfg.NodeName, cfg.NodeAddress}, ca_path, key_path, cert_path); err != nil { + if err = crypt.InitTlsPairCa([]string{cfg.NodeName, cfg.NodeAddress}, caPath, keyPath, certPath); err != nil { return err } @@ -143,9 +143,9 @@ func main() { } // Set one connection and WAL mode to handle "database is locked" errors - sql_db, _ := db.DB() - sql_db.SetMaxOpenConns(1) - sql_db.Exec("PRAGMA journal_mode=WAL;") + sqlDb, _ := db.DB() + sqlDb.SetMaxOpenConns(1) + sqlDb.Exec("PRAGMA journal_mode=WAL;") log.Info("Fish starting node...") fish, err := fish.New(db, cfg) @@ -160,17 +160,17 @@ func main() { } log.Info("Fish starting ssh proxy...") - id_rsa_path := cfg.NodeSSHKey - if !filepath.IsAbs(id_rsa_path) { - id_rsa_path = filepath.Join(cfg.Directory, id_rsa_path) + idRsaPath := cfg.NodeSSHKey + if !filepath.IsAbs(idRsaPath) { + idRsaPath = filepath.Join(cfg.Directory, idRsaPath) } - err = proxy_ssh.Init(fish, id_rsa_path, cfg.ProxySshAddress) + err = proxy_ssh.Init(fish, idRsaPath, cfg.ProxySshAddress) if err != nil { return err } log.Info("Fish starting API...") - srv, err := openapi.Init(fish, cfg.APIAddress, ca_path, cert_path, key_path) + srv, err := openapi.Init(fish, cfg.APIAddress, caPath, certPath, keyPath) if err != nil { return err } @@ -197,16 +197,16 @@ func main() { } flags := cmd.Flags() - flags.StringVarP(&api_address, "api", "a", "", "address used to expose the fish API") - flags.StringVar(&proxy_socks_address, "socks_proxy", "", "address used to expose the SOCKS5 proxy") - flags.StringVar(&proxy_ssh_address, "ssh_proxy", "", "address used to expose the SSH proxy") - flags.StringVarP(&node_address, "node", "n", "", "node external endpoint to connect to tell the other nodes") - flags.StringVarP(&cfg_path, "cfg", "c", "", "yaml configuration file") + flags.StringVarP(&apiAddress, "api", "a", "", "address used to expose the fish API") + flags.StringVar(&proxySocksAddress, "socks_proxy", "", "address used to expose the SOCKS5 proxy") + flags.StringVar(&proxySshAddress, "ssh_proxy", "", "address used to expose the SSH proxy") + flags.StringVarP(&nodeAddress, "node", "n", "", "node external endpoint to connect to tell the other nodes") + flags.StringVarP(&cfgPath, "cfg", "c", "", "yaml configuration file") flags.StringVarP(&dir, "dir", "D", "", "database and other fish files directory") - flags.StringVar(&cpu_limit, "cpu", "", "max amount of threads fish node will be able to utilize, default - no limit") - flags.StringVar(&mem_target, "mem", "", "target memory utilization for fish node to run GC more aggressively when too close") - flags.StringVarP(&log_verbosity, "verbosity", "v", "info", "log level (debug, info, warn, error)") - flags.BoolVar(&log_timestamp, "timestamp", true, "prepend timestamps for each log line") + flags.StringVar(&cpuLimit, "cpu", "", "max amount of threads fish node will be able to utilize, default - no limit") + flags.StringVar(&memTarget, "mem", "", "target memory utilization for fish node to run GC more aggressively when too close") + flags.StringVarP(&logVerbosity, "verbosity", "v", "info", "log level (debug, info, warn, error)") + flags.BoolVar(&logTimestamp, "timestamp", true, "prepend timestamps for each log line") flags.Lookup("timestamp").NoOptDefVal = "false" if err := cmd.Execute(); err != nil { diff --git a/lib/crypt/crypt.go b/lib/crypt/crypt.go index 86cd343..d8bb6ac 100644 --- a/lib/crypt/crypt.go +++ b/lib/crypt/crypt.go @@ -23,20 +23,20 @@ import ( ) const ( - Argon2_Algo = "Argon2id" + Argon2Algo = "Argon2id" // Default tuned to process at least 20 API requests/sec on 2CPU - Argon2_Memory = 64 * 1024 // 64MB - Argon2_Iterations = 1 - Argon2_Threads = 8 // Optimal to quickly execute one request, with not much overhead - Argon2_SaltLen = 8 - Argon2_HashLen = 32 + Argon2Memory = 64 * 1024 // 64MB + Argon2Iterations = 1 + Argon2Threads = 8 // Optimal to quickly execute one request, with not much overhead + Argon2Saltlen = 8 + Argon2Hashlen = 32 // <= v0.7.4 hash params for backward-compatibility // could easily choke the API system and cause OOMs so not recommended to use them - v074_Argon2_Algo = "Argon2" - v074_Argon2_Memory = 524288 - v074_Argon2_Iterations = 1 - v074_Argon2_Threads = 1 + v074Argon2Algo = "Argon2" + v074Argon2Memory = 524288 + v074Argon2Iterations = 1 + v074Argon2Threads = 1 RandStringCharsetB58 = "abcdefghijkmnopqrstuvwxyz" + "ABCDEFGHJKLMNPQRSTUVWXYZ123456789" // Base58 @@ -74,42 +74,42 @@ func RandString(size int) string { // Create random string of specified size func RandStringCharset(size int, charset string) string { data := make([]byte, size) - charset_len := big.NewInt(int64(len(charset))) + charsetLen := big.NewInt(int64(len(charset))) for i := range data { - charset_pos, err := rand.Int(rand.Reader, charset_len) + charsetPos, err := rand.Int(rand.Reader, charsetLen) if err != nil { log.Error("Crypt: Failed to generate random string:", err) } - data[i] = charset[charset_pos.Int64()] + data[i] = charset[charsetPos.Int64()] } return string(data) } // Generate a salted hash for the input string with default parameters func NewHash(input string, salt []byte) (h Hash) { - h.Algo = Argon2_Algo + h.Algo = Argon2Algo if salt != nil { h.Salt = salt } else { - h.Salt = RandBytes(Argon2_SaltLen) + h.Salt = RandBytes(Argon2Saltlen) } - h.Prop.Iterations = Argon2_Iterations - h.Prop.Memory = Argon2_Memory - h.Prop.Threads = Argon2_Threads + h.Prop.Iterations = Argon2Iterations + h.Prop.Memory = Argon2Memory + h.Prop.Threads = Argon2Threads // Create hash data - h.Hash = argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, Argon2_HashLen) + h.Hash = argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, Argon2Hashlen) return } // Check the input equal to the current hashed one func (h *Hash) IsEqual(input string) bool { - if h.Algo == v074_Argon2_Algo { + if h.Algo == v074Argon2Algo { // Legacy low-performant parameters, not defined in hash - h.Prop.Iterations = v074_Argon2_Iterations - h.Prop.Memory = v074_Argon2_Memory - h.Prop.Threads = v074_Argon2_Threads + h.Prop.Iterations = v074Argon2Iterations + h.Prop.Memory = v074Argon2Memory + h.Prop.Threads = v074Argon2Threads } return bytes.Equal(h.Hash, argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, uint32(len(h.Hash)))) diff --git a/lib/crypt/init_tls_pair_ca.go b/lib/crypt/init_tls_pair_ca.go index 37a653b..da34da3 100644 --- a/lib/crypt/init_tls_pair_ca.go +++ b/lib/crypt/init_tls_pair_ca.go @@ -29,21 +29,21 @@ import ( "time" ) -func InitTlsPairCa(hosts []string, ca_path, key_path, crt_path string) error { +func InitTlsPairCa(hosts []string, caPath, keyPath, crtPath string) error { // Generates simple CA and Node certificate signed by the CA - _, ca_err := os.Stat(ca_path) - if os.IsNotExist(ca_err) { + _, caErr := os.Stat(caPath) + if os.IsNotExist(caErr) { // Generate new CA since it's not exist - if err := generateSimpleCa(getCaKeyFromCertPath(ca_path), ca_path); err != nil { + if err := generateSimpleCa(getCaKeyFromCertPath(caPath), caPath); err != nil { return err } } - _, key_err := os.Stat(key_path) - _, crt_err := os.Stat(crt_path) - if os.IsNotExist(key_err) || os.IsNotExist(crt_err) { + _, keyErr := os.Stat(keyPath) + _, crtErr := os.Stat(crtPath) + if os.IsNotExist(keyErr) || os.IsNotExist(crtErr) { // Generate fish key & cert - if err := generateSimpleKeyCert(hosts, key_path, crt_path, ca_path); err != nil { + if err := generateSimpleKeyCert(hosts, keyPath, crtPath, caPath); err != nil { return err } } @@ -51,40 +51,40 @@ func InitTlsPairCa(hosts []string, ca_path, key_path, crt_path string) error { return nil } -func getCaKeyFromCertPath(ca_path string) string { +func getCaKeyFromCertPath(caPath string) string { // Just trim the name extension and add ".key" - filename := filepath.Base(ca_path) + filename := filepath.Base(caPath) n := strings.LastIndexByte(filename, '.') if n == -1 { - return ca_path + return caPath } - return filepath.Join(filepath.Dir(ca_path), filename[:n]+".key") + return filepath.Join(filepath.Dir(caPath), filename[:n]+".key") } -func generateSimpleCa(key_path, crt_path string) error { +func generateSimpleCa(keyPath, crtPath string) error { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return err } - not_before := time.Now() + notBefore := time.Now() - serial_number_limit := new(big.Int).Lsh(big.NewInt(1), 128) - serial_number, err := rand.Int(rand.Reader, serial_number_limit) + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return err } template := x509.Certificate{ - SerialNumber: serial_number, + SerialNumber: serialNumber, Subject: pkix.Name{ // It's just an example CA - for prod generate CA & certs yourself with openssl Organization: []string{"Example Co CA"}, CommonName: "ClusterCA", }, - NotBefore: not_before, - NotAfter: not_before.AddDate(10, 0, 0), // 10y + NotBefore: notBefore, + NotAfter: notBefore.AddDate(10, 0, 0), // 10y IsCA: true, KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, @@ -93,27 +93,27 @@ func generateSimpleCa(key_path, crt_path string) error { } // Generate certificate - if err := createCert(crt_path, &priv.PublicKey, priv, &template, &template); err != nil { + if err := createCert(crtPath, &priv.PublicKey, priv, &template, &template); err != nil { return err } // Create private key file - if err := createKey(key_path, priv); err != nil { + if err := createKey(keyPath, priv); err != nil { return err } return nil } -func generateSimpleKeyCert(hosts []string, key_path, crt_path, ca_path string) error { +func generateSimpleKeyCert(hosts []string, keyPath, crtPath, caPath string) error { // Load the CA key and cert - ca_tls, err := tls.LoadX509KeyPair(ca_path, getCaKeyFromCertPath(ca_path)) + caTls, err := tls.LoadX509KeyPair(caPath, getCaKeyFromCertPath(caPath)) if err != nil { return err } - ca_key := ca_tls.PrivateKey + caKey := caTls.PrivateKey - ca_crt, err := x509.ParseCertificate(ca_tls.Certificate[0]) + caCrt, err := x509.ParseCertificate(caTls.Certificate[0]) if err != nil { return err } @@ -124,23 +124,23 @@ func generateSimpleKeyCert(hosts []string, key_path, crt_path, ca_path string) e return err } - not_before := time.Now() + notBefore := time.Now() - serial_number_limit := new(big.Int).Lsh(big.NewInt(1), 128) - serial_number, err := rand.Int(rand.Reader, serial_number_limit) + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return err } template := x509.Certificate{ - SerialNumber: serial_number, + SerialNumber: serialNumber, Subject: pkix.Name{ Organization: []string{"Example Co Crt"}, CommonName: hosts[0], // Node Name is first in hosts list }, - NotBefore: not_before, - NotAfter: not_before.AddDate(1, 0, 0), // 1y + NotBefore: notBefore, + NotAfter: notBefore.AddDate(1, 0, 0), // 1y // Overall for server & client auth KeyUsage: x509.KeyUsageDigitalSignature, @@ -160,38 +160,38 @@ func generateSimpleKeyCert(hosts []string, key_path, crt_path, ca_path string) e } // Generate certificate - if err := createCert(crt_path, &priv.PublicKey, ca_key, &template, ca_crt); err != nil { + if err := createCert(crtPath, &priv.PublicKey, caKey, &template, caCrt); err != nil { return err } // Create private key file - if err := createKey(key_path, priv); err != nil { + if err := createKey(keyPath, priv); err != nil { return err } return nil } -func createCert(crt_path string, pubkey crypto.PublicKey, ca_key crypto.PrivateKey, cert, ca_crt *x509.Certificate) error { +func createCert(crtPath string, pubkey crypto.PublicKey, caKey crypto.PrivateKey, cert, caCrt *x509.Certificate) error { // Generate certificate - der_bytes, err := x509.CreateCertificate(rand.Reader, cert, ca_crt, pubkey, ca_key) + derBytes, err := x509.CreateCertificate(rand.Reader, cert, caCrt, pubkey, caKey) if err != nil { return err } // Create certificate file - crt_out, err := os.Create(crt_path) + crtOut, err := os.Create(crtPath) if err != nil { return err } - defer crt_out.Close() - if err := pem.Encode(crt_out, &pem.Block{Type: "CERTIFICATE", Bytes: der_bytes}); err != nil { + defer crtOut.Close() + if err := pem.Encode(crtOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { return err } // Attach CA certificate to generate complete chain if it's different from the cert data - if cert != ca_crt { - if err := pem.Encode(crt_out, &pem.Block{Type: "CA CERTIFICATE", Bytes: ca_crt.Raw}); err != nil { + if cert != caCrt { + if err := pem.Encode(crtOut, &pem.Block{Type: "CA CERTIFICATE", Bytes: caCrt.Raw}); err != nil { return err } } @@ -199,18 +199,18 @@ func createCert(crt_path string, pubkey crypto.PublicKey, ca_key crypto.PrivateK return nil } -func createKey(key_path string, key crypto.PrivateKey) error { +func createKey(keyPath string, key crypto.PrivateKey) error { // Create private key file - key_out, err := os.OpenFile(key_path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } - defer key_out.Close() - priv_bytes, err := x509.MarshalPKCS8PrivateKey(key) + defer keyOut.Close() + privBytes, err := x509.MarshalPKCS8PrivateKey(key) if err != nil { return err } - if err := pem.Encode(key_out, &pem.Block{Type: "PRIVATE KEY", Bytes: priv_bytes}); err != nil { + if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { return err } diff --git a/lib/drivers/aws/dedicated_pool.go b/lib/drivers/aws/dedicated_pool.go index 9762391..92d6175 100644 --- a/lib/drivers/aws/dedicated_pool.go +++ b/lib/drivers/aws/dedicated_pool.go @@ -22,13 +22,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/adobe/aquarium-fish/lib/log" ) // Custom status to set in the host for simplifying parallel ops in between the updates -const HOST_RESERVED = "reserved" +const HostReserved = "reserved" // TODO: Right now logic pinned to just one node, need to be distributed @@ -39,15 +39,15 @@ type dedicatedPoolWorker struct { record DedicatedPoolRecord // Amount of instances per dedicated host used in capacity calculations - instances_per_host uint + instancesPerHost uint // It's better to update active_hosts by calling updateDedicatedHosts() - active_hosts map[string]ec2_types.Host - active_hosts_updated time.Time - active_hosts_mu sync.RWMutex + active_hosts map[string]ec2types.Host + activeHostsUpdated time.Time + activeHostsMu sync.RWMutex // Hosts to release or scrub at specified time, used by manageHosts process - to_manage_at map[string]time.Time + toManageAt map[string]time.Time } // Function runs as routine and makes sure identified hosts pool fits the configuration @@ -57,8 +57,8 @@ func (d *Driver) newDedicatedPoolWorker(name string, record DedicatedPoolRecord) driver: d, record: record, - active_hosts: make(map[string]ec2_types.Host), - to_manage_at: make(map[string]time.Time), + active_hosts: make(map[string]ec2types.Host), + toManageAt: make(map[string]time.Time), } // Receiving amount of instances per dedicated host @@ -71,80 +71,80 @@ func (d *Driver) newDedicatedPoolWorker(name string, record DedicatedPoolRecord) return worker } -func (w *dedicatedPoolWorker) AvailableCapacity(instance_type string) int64 { +func (w *dedicatedPoolWorker) AvailableCapacity(instanceType string) int64 { // Check if instance type fits the pool type - if instance_type != w.record.Type { - log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instance_type) + if instanceType != w.record.Type { + log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instanceType) return -1 } - var inst_count int64 + var instCount int64 if err := w.updateDedicatedHosts(); err != nil { - w.active_hosts_mu.RLock() - log.Warnf("AWS: dedicated %q: Unable to update dedicated hosts list, continue with %q: %v", w.active_hosts_updated, err) - w.active_hosts_mu.RUnlock() + w.activeHostsMu.RLock() + log.Warnf("AWS: dedicated %q: Unable to update dedicated hosts list, continue with %q: %v", w.activeHostsUpdated, err) + w.activeHostsMu.RUnlock() } // Looking for the available hosts in the list and their capacity - w.active_hosts_mu.RLock() - defer w.active_hosts_mu.RUnlock() + w.activeHostsMu.RLock() + defer w.activeHostsMu.RUnlock() for _, host := range w.active_hosts { // For now support only single-type dedicated hosts, because primary goal is mac machines - inst_count += int64(getHostCapacity(&host)) + instCount += int64(getHostCapacity(&host)) } // Let's add the amount of instances we can allocate - inst_count += (int64(w.record.Max) - int64(len(w.active_hosts))) * int64(w.instances_per_host) + instCount += (int64(w.record.Max) - int64(len(w.active_hosts))) * int64(w.instancesPerHost) - log.Debugf("AWS: dedicated %q: AvailableCapacity for dedicated host type %q: %d", w.name, w.record.Type, inst_count) + log.Debugf("AWS: dedicated %q: AvailableCapacity for dedicated host type %q: %d", w.name, w.record.Type, instCount) - return inst_count + return instCount } // Internally reserves the existing dedicated host if possible till the next list update -func (w *dedicatedPoolWorker) ReserveHost(instance_type string) string { - if instance_type != w.record.Type { - log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instance_type) +func (w *dedicatedPoolWorker) ReserveHost(instanceType string) string { + if instanceType != w.record.Type { + log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instanceType) return "" } // Using write lock here because it modifies the list of hosts in the end - w.active_hosts_mu.Lock() - defer w.active_hosts_mu.Unlock() + w.activeHostsMu.Lock() + defer w.activeHostsMu.Unlock() - var available_hosts []string + var availableHosts []string // Look for the hosts with capacity - for host_id, host := range w.active_hosts { + for hostId, host := range w.active_hosts { if getHostCapacity(&host) > 0 { - available_hosts = append(available_hosts, host_id) + availableHosts = append(availableHosts, hostId) } } - if len(available_hosts) < 1 { + if len(availableHosts) < 1 { log.Infof("AWS: dedicated %q: No available hosts found in the current active list", w.name) return "" } // Pick random one from the list of available hosts to reduce the possibility of conflict - host := w.active_hosts[available_hosts[rand.Intn(len(available_hosts))]] // #nosec G404 + host := w.active_hosts[availableHosts[rand.Intn(len(availableHosts))]] // #nosec G404 // Mark it as reserved temporary to ease multi-allocation at the same time - host.State = HOST_RESERVED + host.State = HostReserved w.active_hosts[aws.ToString(host.HostId)] = host return aws.ToString(host.HostId) } // Allocates the new dedicated host if possible -func (w *dedicatedPoolWorker) AllocateHost(instance_type string) string { - if instance_type != w.record.Type { - log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instance_type) +func (w *dedicatedPoolWorker) AllocateHost(instanceType string) string { + if instanceType != w.record.Type { + log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instanceType) return "" } - curr_active_hosts := len(w.active_hosts) - if w.record.Max <= uint(curr_active_hosts) { - log.Warnf("AWS: dedicated %q: Unable to request new host due to reached the maximum limit: %d <= %d", w.name, w.record.Max, curr_active_hosts) + currActiveHosts := len(w.active_hosts) + if w.record.Max <= uint(currActiveHosts) { + log.Warnf("AWS: dedicated %q: Unable to request new host due to reached the maximum limit: %d <= %d", w.name, w.record.Max, currActiveHosts) return "" } @@ -158,50 +158,50 @@ func (w *dedicatedPoolWorker) AllocateHost(instance_type string) string { } // Will reserve existing or allocate the new host -func (w *dedicatedPoolWorker) ReserveAllocateHost(instance_type string) string { - if instance_type != w.record.Type { - log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instance_type) +func (w *dedicatedPoolWorker) ReserveAllocateHost(instanceType string) string { + if instanceType != w.record.Type { + log.Warnf("AWS: dedicated %q: Incorrect pool type requested: %s", w.name, instanceType) return "" } - out := w.ReserveHost(instance_type) + out := w.ReserveHost(instanceType) if out != "" { return out } - return w.AllocateHost(instance_type) + return w.AllocateHost(instanceType) } func (w *dedicatedPoolWorker) fetchInstancesPerHost() { if strings.HasSuffix(w.record.Type, ".metal") { // We don't need to continue because metal == metal and means 1:1 capacity - w.instances_per_host = 1 + w.instancesPerHost = 1 return } // Getting types to find dedicated host capacity // Adding the same type but with .metal on the end - dot_pos := strings.Index(w.record.Type, ".") - if dot_pos == -1 { - dot_pos = len(w.record.Type) + dotPos := strings.Index(w.record.Type, ".") + if dotPos == -1 { + dotPos = len(w.record.Type) } - host_type := w.record.Type[0:dot_pos] + ".metal" - types := []string{w.record.Type, host_type} + hostType := w.record.Type[0:dotPos] + ".metal" + types := []string{w.record.Type, hostType} // We will not end until this works as expected. Not great in case user messed up with config, // but at least it will not leave the AWS driver not operational. conn := w.driver.newEC2Conn() for { - inst_types, err := w.driver.getTypes(conn, types) + instTypes, err := w.driver.getTypes(conn, types) if err != nil { log.Errorf("AWS: dedicated %q: Unable to get types %q (will retry): %v", w.name, types, err) time.Sleep(10 * time.Second) continue } - inst_vcpus := aws.ToInt32(inst_types[w.record.Type].VCpuInfo.DefaultVCpus) - host_vcpus := aws.ToInt32(inst_types[host_type].VCpuInfo.DefaultVCpus) - w.instances_per_host = uint(host_vcpus / inst_vcpus) - log.Debugf("AWS: dedicated %q: Fetched amount of instances per host: %d", w.name, w.instances_per_host) + instVcpus := aws.ToInt32(instTypes[w.record.Type].VCpuInfo.DefaultVCpus) + hostVcpus := aws.ToInt32(instTypes[hostType].VCpuInfo.DefaultVCpus) + w.instancesPerHost = uint(hostVcpus / instVcpus) + log.Debugf("AWS: dedicated %q: Fetched amount of instances per host: %d", w.name, w.instancesPerHost) return } } @@ -225,38 +225,38 @@ func (w *dedicatedPoolWorker) backgroundProcess() { // Runs periodically to keep the hosts pool busy and cheap // Will return the list of hosts to release or exetute a scrubbing process for macs func (w *dedicatedPoolWorker) manageHosts() []string { - w.active_hosts_mu.RLock() - defer w.active_hosts_mu.RUnlock() + w.activeHostsMu.RLock() + defer w.activeHostsMu.RUnlock() // List of hosts to clean from w.to_manage_at list - var to_clean []string - var to_release []string + var toClean []string + var toRelease []string // Going through the process list - for host_id, timeout := range w.to_manage_at { - if host, ok := w.active_hosts[host_id]; !ok || isHostUsed(&host) { + for hostId, timeout := range w.toManageAt { + if host, ok := w.active_hosts[hostId]; !ok || isHostUsed(&host) { // The host is disappeared or used, we don't need to manage it out anymore - to_clean = append(to_clean, host_id) + toClean = append(toClean, hostId) continue } // Host seems still exists and not used - check for timeout if timeout.Before(time.Now()) { // Timeout for the host reached - let's put it in the release bucket - to_release = append(to_release, host_id) + toRelease = append(toRelease, hostId) } } // Cleaning up the manage list - for _, host_id := range to_clean { - delete(w.to_manage_at, host_id) + for _, hostId := range toClean { + delete(w.toManageAt, hostId) } // Going through the active hosts and updating to_manage list - for host_id, host := range w.active_hosts { - if host.State == ec2_types.AllocationStatePermanentFailure { + for hostId, host := range w.active_hosts { + if host.State == ec2types.AllocationStatePermanentFailure { // Immediately release - we don't need failed hosts in our pool - to_release = append(to_release, host_id) + toRelease = append(toRelease, hostId) } // We don't need to manage out the hosts in use @@ -265,14 +265,14 @@ func (w *dedicatedPoolWorker) manageHosts() []string { } // If it's mac not too old and in scrubbing process (pending) - we don't need to bother - if host.State == ec2_types.AllocationStatePending && isHostMac(&host) && !isMacTooOld(&host) { + if host.State == ec2types.AllocationStatePending && isHostMac(&host) && !isMacTooOld(&host) { continue } // Skipping the hosts that already in managed list found := false - for hid := range w.to_manage_at { - if host_id == hid { + for hid := range w.toManageAt { + if hostId == hid { found = true break } @@ -284,37 +284,37 @@ func (w *dedicatedPoolWorker) manageHosts() []string { // Check if mac - giving it some time before action release or scrubbing // If not mac or mac is old: giving a chance to be reused - will be processed next cycle if isHostMac(&host) && !isMacTooOld(&host) { - w.to_manage_at[host_id] = time.Now().Add(time.Duration(w.record.ScrubbingDelay)) + w.toManageAt[hostId] = time.Now().Add(time.Duration(w.record.ScrubbingDelay)) } else { - w.to_manage_at[host_id] = time.Now() + w.toManageAt[hostId] = time.Now() } - log.Debugf("AWS: dedicated %q: Added new host to be managed out: %q at %q", w.name, host_id, w.to_manage_at[host_id]) + log.Debugf("AWS: dedicated %q: Added new host to be managed out: %q at %q", w.name, hostId, w.toManageAt[hostId]) } - return to_release + return toRelease } -func (w *dedicatedPoolWorker) releaseHosts(release_hosts []string) { - if len(release_hosts) < 1 { +func (w *dedicatedPoolWorker) releaseHosts(releaseHosts []string) { + if len(releaseHosts) < 1 { // Skipping since there is nothing to do return } - log.Debugf("AWS: dedicated %q: Dealing with hosts to release: %v", w.name, release_hosts) + log.Debugf("AWS: dedicated %q: Dealing with hosts to release: %v", w.name, releaseHosts) // Function removes the items from the active hosts map to optimize the processes - w.active_hosts_mu.Lock() - defer w.active_hosts_mu.Unlock() + w.activeHostsMu.Lock() + defer w.activeHostsMu.Unlock() // Check if there are macs which need a special treatment - var mac_hosts []string - var to_release []string - for _, host_id := range release_hosts { + var macHosts []string + var toRelease []string + for _, hostId := range releaseHosts { // Special treatment for mac hosts - it makes not much sense to try to release them until // they've live for 24h due to Apple-AWS license. - if host, ok := w.active_hosts[host_id]; ok && host.HostProperties != nil { + if host, ok := w.active_hosts[hostId]; ok && host.HostProperties != nil { if isHostMac(&host) { - mac_hosts = append(mac_hosts, host_id) + macHosts = append(macHosts, hostId) // If mac host not reached 24h since allocation - skipping addition to the release list if !isHostReadyForRelease(&host) { continue @@ -322,63 +322,63 @@ func (w *dedicatedPoolWorker) releaseHosts(release_hosts []string) { } } // Adding any host to to_release list - to_release = append(to_release, host_id) + toRelease = append(toRelease, hostId) } // Run the release process for multiple hosts - release_failed, err := w.releaseDedicatedHosts(to_release) + releaseFailed, err := w.releaseDedicatedHosts(toRelease) if err != nil { - log.Errorf("AWS: dedicated %q: Unable to send request for release of the hosts %v: %v", w.name, to_release, err) + log.Errorf("AWS: dedicated %q: Unable to send request for release of the hosts %v: %v", w.name, toRelease, err) // Not fatal, because we still need to deal with mac hosts } // Cleanup the released hosts from the active hosts list - for _, host_id := range to_release { + for _, hostId := range toRelease { // Skipping if release of the host failed for some reason - for _, failed_host_id := range release_failed { - if failed_host_id == host_id { + for _, failedHostId := range releaseFailed { + if failedHostId == hostId { continue } } - delete(w.active_hosts, host_id) + delete(w.active_hosts, hostId) } // Scrubbing the rest of mac hosts - if len(mac_hosts) > 0 && w.record.ScrubbingDelay != 0 { - for _, host_id := range mac_hosts { - host, ok := w.active_hosts[host_id] - if !ok || host.State == ec2_types.AllocationStatePending { + if len(macHosts) > 0 && w.record.ScrubbingDelay != 0 { + for _, hostId := range macHosts { + host, ok := w.active_hosts[hostId] + if !ok || host.State == ec2types.AllocationStatePending { // The host was released or already in scrubbing - skipping it continue } // Reserve the host internally for scrubbing process to prevent allocation issues - host.State = HOST_RESERVED + host.State = HostReserved w.active_hosts[aws.ToString(host.HostId)] = host // Triggering the scrubbing process - if err := w.driver.triggerHostScrubbing(host_id, aws.ToString(host.HostProperties.InstanceType)); err != nil { - log.Errorf("AWS: dedicated %q: Unable to run scrubbing for host %q: %v", w.name, host_id, err) + if err := w.driver.triggerHostScrubbing(hostId, aws.ToString(host.HostProperties.InstanceType)); err != nil { + log.Errorf("AWS: dedicated %q: Unable to run scrubbing for host %q: %v", w.name, hostId, err) continue } // Removing the host from the list - delete(w.active_hosts, host_id) + delete(w.active_hosts, hostId) } } } -func isHostMac(host *ec2_types.Host) bool { +func isHostMac(host *ec2types.Host) bool { return host.HostProperties != nil && awsInstTypeAny(aws.ToString(host.HostProperties.InstanceType), "mac") } -func isMacTooOld(host *ec2_types.Host) bool { +func isMacTooOld(host *ec2types.Host) bool { return aws.ToTime(host.AllocationTime).Before(time.Now().Add(-24 * time.Hour)) } // Check if the host is ready to be released - if it's mac then it should be older then 24h -func isHostReadyForRelease(host *ec2_types.Host) bool { +func isHostReadyForRelease(host *ec2types.Host) bool { // Host not used - for sure ready for release if !isHostUsed(host) { // If mac is not old enough - it's not ready for release @@ -389,7 +389,7 @@ func isHostReadyForRelease(host *ec2_types.Host) bool { } // Mac in scrubbing process (pending) can be released but should be older then 24h - if host.State == ec2_types.AllocationStatePending && isHostMac(host) && isMacTooOld(host) { + if host.State == ec2types.AllocationStatePending && isHostMac(host) && isMacTooOld(host) { return true } @@ -397,16 +397,16 @@ func isHostReadyForRelease(host *ec2_types.Host) bool { } // Check if the host is used -func isHostUsed(host *ec2_types.Host) bool { - if host.State == HOST_RESERVED || len(host.Instances) > 0 { +func isHostUsed(host *ec2types.Host) bool { + if host.State == HostReserved || len(host.Instances) > 0 { return true } return false } // Check how much capacity we have on a host -func getHostCapacity(host *ec2_types.Host) uint { - if host.State != ec2_types.AllocationStateAvailable || host.AvailableCapacity == nil { +func getHostCapacity(host *ec2types.Host) uint { + if host.State != ec2types.AllocationStateAvailable || host.AvailableCapacity == nil { return 0 } // TODO: For now supports only single-type dedicated hosts @@ -414,27 +414,27 @@ func getHostCapacity(host *ec2_types.Host) uint { } // Updates the hosts list every 5 minutes -func (w *dedicatedPoolWorker) updateDedicatedHostsProcess() ([]ec2_types.Host, error) { +func (w *dedicatedPoolWorker) updateDedicatedHostsProcess() ([]ec2types.Host, error) { defer log.Infof("AWS: dedicated %q: Exited updateDedicatedHostsProcess()", w.name) // Balancing the regular update delay based on the scrubbing optimization because it needs to // record the time of host state change and only then the timer to scrubbing will start ticking - update_delay := 5 * time.Minute // 5 min by default - scrubbing_delay := time.Duration(w.record.ScrubbingDelay) - if scrubbing_delay != 0 && scrubbing_delay < 10*time.Minute { - update_delay = scrubbing_delay / 2 + updateDelay := 5 * time.Minute // 5 min by default + scrubbingDelay := time.Duration(w.record.ScrubbingDelay) + if scrubbingDelay != 0 && scrubbingDelay < 10*time.Minute { + updateDelay = scrubbingDelay / 2 } for { time.Sleep(30 * time.Second) // We need to keep the request rate budget, so using a delay between regular updates. // If the dedicated hosts are used often, it could wait for a while due to often updates - w.active_hosts_mu.RLock() - last_update := w.active_hosts_updated - w.active_hosts_mu.RUnlock() - if last_update.Before(time.Now().Add(-update_delay)) { + w.activeHostsMu.RLock() + lastUpdate := w.activeHostsUpdated + w.activeHostsMu.RUnlock() + if lastUpdate.Before(time.Now().Add(-updateDelay)) { if err := w.updateDedicatedHosts(); err != nil { - log.Warnf("AWS: dedicated %q: Error happened during the regular hosts update, continue with updated on %q: %v", last_update, err) + log.Warnf("AWS: dedicated %q: Error happened during the regular hosts update, continue with updated on %q: %v", lastUpdate, err) } } } @@ -443,10 +443,10 @@ func (w *dedicatedPoolWorker) updateDedicatedHostsProcess() ([]ec2_types.Host, e // Will list all the allocated dedicated hosts on AWS with desired zone and tag func (w *dedicatedPoolWorker) updateDedicatedHosts() error { // Do not update too often - w.active_hosts_mu.RLock() - ready_for_update := w.active_hosts_updated.Before(time.Now().Add(-10 * time.Second)) - w.active_hosts_mu.RUnlock() - if !ready_for_update { + w.activeHostsMu.RLock() + readyForUpdate := w.activeHostsUpdated.Before(time.Now().Add(-10 * time.Second)) + w.activeHostsMu.RUnlock() + if !readyForUpdate { return nil } @@ -454,15 +454,15 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { conn := w.driver.newEC2Conn() p := ec2.NewDescribeHostsPaginator(conn, &ec2.DescribeHostsInput{ - Filter: []ec2_types.Filter{ + Filter: []ec2types.Filter{ // We don't need released hosts, so skipping them { Name: aws.String("state"), Values: []string{ - string(ec2_types.AllocationStateAvailable), - string(ec2_types.AllocationStateUnderAssessment), - string(ec2_types.AllocationStatePermanentFailure), - string(ec2_types.AllocationStatePending), + string(ec2types.AllocationStateAvailable), + string(ec2types.AllocationStateUnderAssessment), + string(ec2types.AllocationStatePermanentFailure), + string(ec2types.AllocationStatePending), }, }, { @@ -481,7 +481,7 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { }) // Processing the hosts - curr_active_hosts := make(map[string]ec2_types.Host) + currActiveHosts := make(map[string]ec2types.Host) for p.HasMorePages() { resp, err := p.NextPage(context.TODO()) if err != nil { @@ -489,27 +489,27 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { } for _, rh := range resp.Hosts { - host_id := aws.ToString(rh.HostId) - curr_active_hosts[host_id] = rh + hostId := aws.ToString(rh.HostId) + currActiveHosts[hostId] = rh // If the response host has not changed, use the same object in the active list - if ah, ok := w.active_hosts[host_id]; ok && ah.State == rh.State && len(ah.Instances) == len(rh.Instances) { - curr_active_hosts[host_id] = w.active_hosts[host_id] + if ah, ok := w.active_hosts[hostId]; ok && ah.State == rh.State && len(ah.Instances) == len(rh.Instances) { + currActiveHosts[hostId] = w.active_hosts[hostId] } } } // Updating the list of hosts with received data - w.active_hosts_mu.Lock() - defer w.active_hosts_mu.Unlock() + w.activeHostsMu.Lock() + defer w.activeHostsMu.Unlock() - w.active_hosts_updated = time.Now() - w.active_hosts = curr_active_hosts + w.activeHostsUpdated = time.Now() + w.active_hosts = currActiveHosts // Printing list for debug purposes if log.Verbosity == 1 { log.Debugf("AWS: dedicated %q: Amount of active hosts in pool: %d", w.name, len(w.active_hosts)) - for host_id, host := range w.active_hosts { - log.Debugf("AWS: dedicated %q: active_hosts item: host_id:%q, allocated:%q, state:%q, capacity:%d (%d)", w.name, host_id, host.AllocationTime, host.State, getHostCapacity(&host), w.instances_per_host) + for hostId, host := range w.active_hosts { + log.Debugf("AWS: dedicated %q: active_hosts item: host_id:%q, allocated:%q, state:%q, capacity:%d (%d)", w.name, hostId, host.AllocationTime, host.State, getHostCapacity(&host), w.instancesPerHost) } } @@ -523,13 +523,13 @@ func (w *dedicatedPoolWorker) allocateDedicatedHosts(amount int32) ([]string, er input := &ec2.AllocateHostsInput{ AvailabilityZone: aws.String(w.record.Zone), - AutoPlacement: ec2_types.AutoPlacementOff, // Managed hosts are for targeted workload + AutoPlacement: ec2types.AutoPlacementOff, // Managed hosts are for targeted workload InstanceType: aws.String(w.record.Type), Quantity: aws.Int32(amount), - TagSpecifications: []ec2_types.TagSpecification{{ - ResourceType: ec2_types.ResourceTypeDedicatedHost, - Tags: []ec2_types.Tag{ + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeDedicatedHost, + Tags: []ec2types.Tag{ { Key: aws.String("AquariumDedicatedPoolName"), Value: aws.String(w.name), @@ -571,13 +571,13 @@ func (w *dedicatedPoolWorker) releaseDedicatedHosts(ids []string) ([]string, err var unsuccessful []string if len(resp.Unsuccessful) > 0 { - failed_info := "" + failedInfo := "" for _, item := range resp.Unsuccessful { - failed_info += fmt.Sprintf("- InstanceId: %s\n Error: %s %q\n", aws.ToString(item.ResourceId), aws.ToString(item.Error.Code), aws.ToString(item.Error.Message)) + failedInfo += fmt.Sprintf("- InstanceId: %s\n Error: %s %q\n", aws.ToString(item.ResourceId), aws.ToString(item.Error.Code), aws.ToString(item.Error.Message)) unsuccessful = append(unsuccessful, aws.ToString(item.ResourceId)) } - log.Warnf("AWS: dedicated %q: Not all the hosts were released as requested:\n%v", w.name, failed_info) + log.Warnf("AWS: dedicated %q: Not all the hosts were released as requested:\n%v", w.name, failedInfo) } log.Infof("AWS: dedicated %q: Released hosts: %v", w.name, resp.Successful) diff --git a/lib/drivers/aws/driver.go b/lib/drivers/aws/driver.go index 195f172..7f3428c 100644 --- a/lib/drivers/aws/driver.go +++ b/lib/drivers/aws/driver.go @@ -26,7 +26,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/adobe/aquarium-fish/lib/crypt" "github.com/adobe/aquarium-fish/lib/drivers" @@ -54,14 +54,14 @@ func init() { type Driver struct { cfg Config // Contains the available tasks of the driver - tasks_list []drivers.ResourceDriverTask + tasksList []drivers.ResourceDriverTask // Contains quotas cache to not load them for every sneeze - quotas map[string]int64 - quotas_mutex sync.Mutex - quotas_next_update time.Time + quotas map[string]int64 + quotasMutex sync.Mutex + quotasNextUpdate time.Time - dedicated_pools map[string]*dedicatedPoolWorker + dedicatedPools map[string]*dedicatedPoolWorker } func (d *Driver) Name() string { @@ -81,12 +81,12 @@ func (d *Driver) Prepare(config []byte) error { } // Fill up the available tasks to execute - d.tasks_list = append(d.tasks_list, + d.tasksList = append(d.tasksList, &TaskSnapshot{driver: d}, &TaskImage{driver: d}, ) - d.quotas_mutex.Lock() + d.quotasMutex.Lock() { // Preparing a map of useful quotas for easy access and update it d.quotas = make(map[string]int64) @@ -101,12 +101,12 @@ func (d *Driver) Prepare(config []byte) error { d.quotas["Running On-Demand Trn instances"] = 0 d.quotas["Running On-Demand X instances"] = 0 } - d.quotas_mutex.Unlock() + d.quotasMutex.Unlock() // Run the background dedicated hosts pool management - d.dedicated_pools = make(map[string]*dedicatedPoolWorker) + d.dedicatedPools = make(map[string]*dedicatedPoolWorker) for name, params := range d.cfg.DedicatedPool { - d.dedicated_pools[name] = d.newDedicatedPoolWorker(name, params) + d.dedicatedPools[name] = d.newDedicatedPoolWorker(name, params) } return nil @@ -127,8 +127,8 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { } // Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDefinition) int64 { - var inst_count int64 +func (d *Driver) AvailableCapacity(nodeUsage types.Resources, def types.LabelDefinition) int64 { + var instCount int64 var opts Options if err := opts.Apply(def.Options); err != nil { @@ -136,12 +136,12 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDe return -1 } - conn_ec2 := d.newEC2Conn() + connEc2 := d.newEC2Conn() // Dedicated hosts if opts.Pool != "" { // The pool is specified - let's check if it has the capacity - if p, ok := d.dedicated_pools[opts.Pool]; ok { + if p, ok := d.dedicatedPools[opts.Pool]; ok { return p.AvailableCapacity(opts.InstanceType) } log.Warn("AWS: Unable to locate dedicated pool:", opts.Pool) @@ -149,8 +149,8 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDe } else if awsInstTypeAny(opts.InstanceType, "mac") { // Ensure we have the available auto-placing dedicated hosts to use as base for resource. // Quotas for hosts are: "Running Dedicated mac1 Hosts" & "Running Dedicated mac2 Hosts" - p := ec2.NewDescribeHostsPaginator(conn_ec2, &ec2.DescribeHostsInput{ - Filter: []ec2_types.Filter{ + p := ec2.NewDescribeHostsPaginator(connEc2, &ec2.DescribeHostsInput{ + Filter: []ec2types.Filter{ { Name: aws.String("instance-type"), Values: []string{opts.InstanceType}, @@ -169,85 +169,85 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDe log.Error("AWS: Error during requesting hosts:", err) return -1 } - inst_count += int64(len(resp.Hosts)) + instCount += int64(len(resp.Hosts)) } - log.Debug("AWS: AvailableCapacity for dedicated Mac:", opts.InstanceType, inst_count) + log.Debug("AWS: AvailableCapacity for dedicated Mac:", opts.InstanceType, instCount) - return inst_count + return instCount } // On-Demand hosts d.updateQuotas(false) - d.quotas_mutex.Lock() + d.quotasMutex.Lock() { // All the "Running On-Demand" quotas are per vCPU (for ex. 64 means 4 instances) - var cpu_quota int64 - inst_types := []string{} + var cpuQuota int64 + instTypes := []string{} // Check we have enough quotas for specified instance type if awsInstTypeAny(opts.InstanceType, "dl") { - cpu_quota = d.quotas["Running On-Demand DL instances"] - inst_types = append(inst_types, "dl") + cpuQuota = d.quotas["Running On-Demand DL instances"] + instTypes = append(instTypes, "dl") } else if awsInstTypeAny(opts.InstanceType, "u-") { - cpu_quota = d.quotas["Running On-Demand High Memory instances"] - inst_types = append(inst_types, "u-") + cpuQuota = d.quotas["Running On-Demand High Memory instances"] + instTypes = append(instTypes, "u-") } else if awsInstTypeAny(opts.InstanceType, "hpc") { - cpu_quota = d.quotas["Running On-Demand HPC instances"] - inst_types = append(inst_types, "hpc") + cpuQuota = d.quotas["Running On-Demand HPC instances"] + instTypes = append(instTypes, "hpc") } else if awsInstTypeAny(opts.InstanceType, "inf") { - cpu_quota = d.quotas["Running On-Demand Inf instances"] - inst_types = append(inst_types, "inf") + cpuQuota = d.quotas["Running On-Demand Inf instances"] + instTypes = append(instTypes, "inf") } else if awsInstTypeAny(opts.InstanceType, "trn") { - cpu_quota = d.quotas["Running On-Demand Trn instances"] - inst_types = append(inst_types, "trn") + cpuQuota = d.quotas["Running On-Demand Trn instances"] + instTypes = append(instTypes, "trn") } else if awsInstTypeAny(opts.InstanceType, "f") { - cpu_quota = d.quotas["Running On-Demand F instances"] - inst_types = append(inst_types, "f") + cpuQuota = d.quotas["Running On-Demand F instances"] + instTypes = append(instTypes, "f") } else if awsInstTypeAny(opts.InstanceType, "g", "vt") { - cpu_quota = d.quotas["Running On-Demand G and VT instances"] - inst_types = append(inst_types, "g", "vt") + cpuQuota = d.quotas["Running On-Demand G and VT instances"] + instTypes = append(instTypes, "g", "vt") } else if awsInstTypeAny(opts.InstanceType, "p") { - cpu_quota = d.quotas["Running On-Demand P instances"] - inst_types = append(inst_types, "p") + cpuQuota = d.quotas["Running On-Demand P instances"] + instTypes = append(instTypes, "p") } else if awsInstTypeAny(opts.InstanceType, "x") { - cpu_quota = d.quotas["Running On-Demand X instances"] - inst_types = append(inst_types, "x") + cpuQuota = d.quotas["Running On-Demand X instances"] + instTypes = append(instTypes, "x") } else if awsInstTypeAny(opts.InstanceType, "a", "c", "d", "h", "i", "m", "r", "t", "z") { - cpu_quota = d.quotas["Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances"] - inst_types = append(inst_types, "a", "c", "d", "h", "i", "m", "r", "t", "z") + cpuQuota = d.quotas["Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances"] + instTypes = append(instTypes, "a", "c", "d", "h", "i", "m", "r", "t", "z") } else { log.Error("AWS: Driver does not support instance type:", opts.InstanceType) return -1 } // Checking the current usage of CPU's of this project and subtracting it from quota value - cpu_usage, err := d.getProjectCpuUsage(conn_ec2, inst_types) + cpuUsage, err := d.getProjectCpuUsage(connEc2, instTypes) if err != nil { return -1 } // To get the available instances we need to divide free cpu's by requested Definition CPU amount - inst_count = (cpu_quota - cpu_usage) / int64(def.Resources.Cpu) + instCount = (cpuQuota - cpuUsage) / int64(def.Resources.Cpu) } - d.quotas_mutex.Unlock() + d.quotasMutex.Unlock() // Make sure we have enough IP's in the selected VPC or subnet - var ip_count int64 + var ipCount int64 var err error - if _, ip_count, err = d.getSubnetId(conn_ec2, def.Resources.Network); err != nil { + if _, ipCount, err = d.getSubnetId(connEc2, def.Resources.Network); err != nil { log.Error("AWS: Error during requesting subnet:", err) return -1 } - log.Debugf("AWS: AvailableCapacity: Quotas: %d, IP's: %d", inst_count, ip_count) + log.Debugf("AWS: AvailableCapacity: Quotas: %d, IP's: %d", instCount, ipCount) // Return the most limiting value - if ip_count < inst_count { - return ip_count + if ipCount < instCount { + return ipCount } - return inst_count + return instCount } /** @@ -259,40 +259,40 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, def types.LabelDe func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { // Generate fish name buf := crypt.RandBytes(6) - i_name := fmt.Sprintf("fish-%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) + iName := fmt.Sprintf("fish-%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) var opts Options if err := opts.Apply(def.Options); err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to apply options: %v", i_name, err) + return nil, fmt.Errorf("AWS: %s: Unable to apply options: %v", iName, err) } conn := d.newEC2Conn() // Checking the VPC exists or use default one - vm_network := def.Resources.Network + vmNetwork := def.Resources.Network var err error - if vm_network, _, err = d.getSubnetId(conn, vm_network); err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to get subnet: %v", i_name, err) + if vmNetwork, _, err = d.getSubnetId(conn, vmNetwork); err != nil { + return nil, fmt.Errorf("AWS: %s: Unable to get subnet: %v", iName, err) } - log.Infof("AWS: %s: Selected subnet: %q", i_name, vm_network) + log.Infof("AWS: %s: Selected subnet: %q", iName, vmNetwork) - vm_image := opts.Image - if vm_image, err = d.getImageId(conn, vm_image); err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to get image: %v", i_name, err) + vmImage := opts.Image + if vmImage, err = d.getImageId(conn, vmImage); err != nil { + return nil, fmt.Errorf("AWS: %s: Unable to get image: %v", iName, err) } - log.Infof("AWS: %s: Selected image: %q", i_name, vm_image) + log.Infof("AWS: %s: Selected image: %q", iName, vmImage) // Prepare Instance request information input := ec2.RunInstancesInput{ - ImageId: aws.String(vm_image), - InstanceType: ec2_types.InstanceType(opts.InstanceType), + ImageId: aws.String(vmImage), + InstanceType: ec2types.InstanceType(opts.InstanceType), - NetworkInterfaces: []ec2_types.InstanceNetworkInterfaceSpecification{ + NetworkInterfaces: []ec2types.InstanceNetworkInterfaceSpecification{ { AssociatePublicIpAddress: aws.Bool(false), DeleteOnTermination: aws.Bool(true), DeviceIndex: aws.Int32(0), - SubnetId: aws.String(vm_network), + SubnetId: aws.String(vmNetwork), }, }, @@ -302,23 +302,23 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* if opts.Pool != "" { // Let's reserve or allocate the host for the new instance - if p, ok := d.dedicated_pools[opts.Pool]; ok { - host_id := p.ReserveAllocateHost(opts.InstanceType) - if host_id == "" { - return nil, fmt.Errorf("AWS: %s: Unable to reserve host in dedicated pool %q", i_name, opts.Pool) + if p, ok := d.dedicatedPools[opts.Pool]; ok { + hostId := p.ReserveAllocateHost(opts.InstanceType) + if hostId == "" { + return nil, fmt.Errorf("AWS: %s: Unable to reserve host in dedicated pool %q", iName, opts.Pool) } - input.Placement = &ec2_types.Placement{ - Tenancy: ec2_types.TenancyHost, - HostId: aws.String(host_id), + input.Placement = &ec2types.Placement{ + Tenancy: ec2types.TenancyHost, + HostId: aws.String(hostId), } - log.Infof("AWS: %s: Utilizing pool %q host: %s", i_name, opts.Pool, host_id) + log.Infof("AWS: %s: Utilizing pool %q host: %s", iName, opts.Pool, hostId) } else { - return nil, fmt.Errorf("AWS: %s: Unable to locate the dedicated pool: %s", i_name, opts.Pool) + return nil, fmt.Errorf("AWS: %s: Unable to locate the dedicated pool: %s", iName, opts.Pool) } } else if awsInstTypeAny(opts.InstanceType, "mac") { // For mac machines only dedicated hosts are working, so set the tenancy - input.Placement = &ec2_types.Placement{ - Tenancy: ec2_types.TenancyHost, + input.Placement = &ec2types.Placement{ + Tenancy: ec2types.TenancyHost, } } @@ -326,47 +326,47 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Set UserData field userdata, err := util.SerializeMetadata(opts.UserDataFormat, opts.UserDataPrefix, metadata) if err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to serialize metadata to userdata: %v", i_name, err) + return nil, fmt.Errorf("AWS: %s: Unable to serialize metadata to userdata: %v", iName, err) } input.UserData = aws.String(base64.StdEncoding.EncodeToString(userdata)) } if opts.SecurityGroup != "" { - vm_secgroup := opts.SecurityGroup - if vm_secgroup, err = d.getSecGroupId(conn, vm_secgroup); err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to get security group: %v", i_name, err) + vmSecgroup := opts.SecurityGroup + if vmSecgroup, err = d.getSecGroupId(conn, vmSecgroup); err != nil { + return nil, fmt.Errorf("AWS: %s: Unable to get security group: %v", iName, err) } - log.Infof("AWS: %s: Selected security group: %q", i_name, vm_secgroup) - input.NetworkInterfaces[0].Groups = []string{vm_secgroup} + log.Infof("AWS: %s: Selected security group: %q", iName, vmSecgroup) + input.NetworkInterfaces[0].Groups = []string{vmSecgroup} } if len(d.cfg.InstanceTags) > 0 || len(opts.Tags) > 0 { - tags_in := map[string]string{} + tagsIn := map[string]string{} // Append tags to the map - from opts (low priority) and from cfg (high priority) for k, v := range opts.Tags { - tags_in[k] = v + tagsIn[k] = v } for k, v := range d.cfg.InstanceTags { - tags_in[k] = v + tagsIn[k] = v } - tags_out := []ec2_types.Tag{} - for k, v := range tags_in { - tags_out = append(tags_out, ec2_types.Tag{ + tagsOut := []ec2types.Tag{} + for k, v := range tagsIn { + tagsOut = append(tagsOut, ec2types.Tag{ Key: aws.String(k), Value: aws.String(v), }) } // Apply name for the instance - tags_out = append(tags_out, ec2_types.Tag{ + tagsOut = append(tagsOut, ec2types.Tag{ Key: aws.String("Name"), - Value: aws.String(i_name), + Value: aws.String(iName), }) - input.TagSpecifications = []ec2_types.TagSpecification{ + input.TagSpecifications = []ec2types.TagSpecification{ { - ResourceType: ec2_types.ResourceTypeInstance, - Tags: tags_out, + ResourceType: ec2types.ResourceTypeInstance, + Tags: tagsOut, }, } } @@ -374,52 +374,52 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Prepare the device mapping if len(def.Resources.Disks) > 0 { for name, disk := range def.Resources.Disks { - mapping := ec2_types.BlockDeviceMapping{ + mapping := ec2types.BlockDeviceMapping{ DeviceName: aws.String(name), - Ebs: &ec2_types.EbsBlockDevice{ + Ebs: &ec2types.EbsBlockDevice{ DeleteOnTermination: aws.Bool(true), - VolumeType: ec2_types.VolumeTypeGp3, + VolumeType: ec2types.VolumeTypeGp3, }, } if disk.Type != "" { - type_data := strings.Split(disk.Type, ":") - if len(type_data) > 0 && type_data[0] != "" { - mapping.Ebs.VolumeType = ec2_types.VolumeType(type_data[0]) + typeData := strings.Split(disk.Type, ":") + if len(typeData) > 0 && typeData[0] != "" { + mapping.Ebs.VolumeType = ec2types.VolumeType(typeData[0]) } - if len(type_data) > 1 && type_data[1] != "" { - val, err := strconv.ParseInt(type_data[1], 10, 32) + if len(typeData) > 1 && typeData[1] != "" { + val, err := strconv.ParseInt(typeData[1], 10, 32) if err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to parse EBS IOPS int32 from '%s': %v", i_name, type_data[1], err) + return nil, fmt.Errorf("AWS: %s: Unable to parse EBS IOPS int32 from '%s': %v", iName, typeData[1], err) } mapping.Ebs.Iops = aws.Int32(int32(val)) } - if len(type_data) > 2 && type_data[2] != "" { - val, err := strconv.ParseInt(type_data[2], 10, 32) + if len(typeData) > 2 && typeData[2] != "" { + val, err := strconv.ParseInt(typeData[2], 10, 32) if err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to parse EBS Throughput int32 from '%s': %v", i_name, type_data[1], err) + return nil, fmt.Errorf("AWS: %s: Unable to parse EBS Throughput int32 from '%s': %v", iName, typeData[1], err) } mapping.Ebs.Throughput = aws.Int32(int32(val)) } } if disk.Clone != "" { // Use snapshot as the disk source - vm_snapshot := disk.Clone - if vm_snapshot, err = d.getSnapshotId(conn, vm_snapshot); err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to get snapshot: %v", i_name, err) + vmSnapshot := disk.Clone + if vmSnapshot, err = d.getSnapshotId(conn, vmSnapshot); err != nil { + return nil, fmt.Errorf("AWS: %s: Unable to get snapshot: %v", iName, err) } - log.Infof("AWS: %s: Selected snapshot: %q", i_name, vm_snapshot) - mapping.Ebs.SnapshotId = aws.String(vm_snapshot) + log.Infof("AWS: %s: Selected snapshot: %q", iName, vmSnapshot) + mapping.Ebs.SnapshotId = aws.String(vmSnapshot) } else { // Just create a new disk mapping.Ebs.VolumeSize = aws.Int32(int32(disk.Size)) if opts.EncryptKey != "" { mapping.Ebs.Encrypted = aws.Bool(true) - key_id, err := d.getKeyId(opts.EncryptKey) + keyId, err := d.getKeyId(opts.EncryptKey) if err != nil { - return nil, fmt.Errorf("AWS: %s: Unable to get encrypt key from KMS: %v", i_name, err) + return nil, fmt.Errorf("AWS: %s: Unable to get encrypt key from KMS: %v", iName, err) } - log.Infof("AWS: %s: Selected encryption key: %q for disk: %q", i_name, key_id, name) - mapping.Ebs.KmsKeyId = aws.String(key_id) + log.Infof("AWS: %s: Selected encryption key: %q for disk: %q", iName, keyId, name) + mapping.Ebs.KmsKeyId = aws.String(keyId) } } input.BlockDeviceMappings = append(input.BlockDeviceMappings, mapping) @@ -429,7 +429,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Run the instance result, err := conn.RunInstances(context.TODO(), &input) if err != nil { - return nil, log.Errorf("AWS: %s: Unable to run instance: %v", i_name, err) + return nil, log.Errorf("AWS: %s: Unable to run instance: %v", iName, err) } inst := &result.Instances[0] @@ -449,12 +449,12 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* } time.Sleep(5 * time.Second) - inst_tmp, err := d.getInstance(conn, aws.ToString(inst.InstanceId)) - if err == nil && inst_tmp != nil { - inst = inst_tmp + instTmp, err := d.getInstance(conn, aws.ToString(inst.InstanceId)) + if err == nil && instTmp != nil { + inst = instTmp } if err != nil { - log.Errorf("AWS: %s: Error during getting instance while waiting for BlockDeviceMappings: %v", i_name, err) + log.Errorf("AWS: %s: Error during getting instance while waiting for BlockDeviceMappings: %v", iName, err) } } for _, bd := range inst.BlockDeviceMappings { @@ -463,25 +463,25 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* continue } - tags_input := ec2.CreateTagsInput{ + tagsInput := ec2.CreateTagsInput{ Resources: []string{aws.ToString(bd.Ebs.VolumeId)}, - Tags: []ec2_types.Tag{}, + Tags: []ec2types.Tag{}, } - tag_vals := strings.Split(disk.Label, ",") - for _, tag_val := range tag_vals { - key_val := strings.SplitN(tag_val, ":", 2) - if len(key_val) < 2 { - key_val = append(key_val, "") + tagVals := strings.Split(disk.Label, ",") + for _, tagVal := range tagVals { + keyVal := strings.SplitN(tagVal, ":", 2) + if len(keyVal) < 2 { + keyVal = append(keyVal, "") } - tags_input.Tags = append(tags_input.Tags, ec2_types.Tag{ - Key: aws.String(key_val[0]), - Value: aws.String(key_val[1]), + tagsInput.Tags = append(tagsInput.Tags, ec2types.Tag{ + Key: aws.String(keyVal[0]), + Value: aws.String(keyVal[1]), }) } - if _, err := conn.CreateTags(context.TODO(), &tags_input); err != nil { + if _, err := conn.CreateTags(context.TODO(), &tagsInput); err != nil { // Do not fail hard here - the instance is already running - log.Warnf("AWS: %s: Unable to set tags for volume: %q, %q, %q", i_name, aws.ToString(bd.Ebs.VolumeId), aws.ToString(bd.DeviceName), err) + log.Warnf("AWS: %s: Unable to set tags for volume: %q, %q, %q", iName, aws.ToString(bd.Ebs.VolumeId), aws.ToString(bd.DeviceName), err) } } } @@ -492,7 +492,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* timeout := 60 for { if inst.PrivateIpAddress != nil { - log.Infof("AWS: %s: Allocate of instance completed: %q, %q", i_name, aws.ToString(inst.InstanceId), aws.ToString(inst.PrivateIpAddress)) + log.Infof("AWS: %s: Allocate of instance completed: %q, %q", iName, aws.ToString(inst.InstanceId), aws.ToString(inst.PrivateIpAddress)) res.Identifier = aws.ToString(inst.InstanceId) res.IpAddr = aws.ToString(inst.PrivateIpAddress) return res, nil @@ -504,17 +504,17 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* } time.Sleep(5 * time.Second) - inst_tmp, err := d.getInstance(conn, aws.ToString(inst.InstanceId)) - if err == nil && inst_tmp != nil { - inst = inst_tmp + instTmp, err := d.getInstance(conn, aws.ToString(inst.InstanceId)) + if err == nil && instTmp != nil { + inst = instTmp } if err != nil { - log.Errorf("AWS: %s: Error during getting instance while waiting for IP: %v, %q", i_name, err, aws.ToString(inst.InstanceId)) + log.Errorf("AWS: %s: Error during getting instance while waiting for IP: %v, %q", iName, err, aws.ToString(inst.InstanceId)) } } res.Identifier = aws.ToString(inst.InstanceId) - return res, log.Errorf("AWS: %s: Unable to locate the instance IP: %q", i_name, aws.ToString(inst.InstanceId)) + return res, log.Errorf("AWS: %s: Unable to locate the instance IP: %q", iName, aws.ToString(inst.InstanceId)) } func (d *Driver) Status(res *types.Resource) (string, error) { @@ -526,7 +526,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { if err != nil { return "", fmt.Errorf("AWS: Error during status check for %s: %v", res.Identifier, err) } - if inst != nil && inst.State.Name != ec2_types.InstanceStateNameTerminated { + if inst != nil && inst.State.Name != ec2types.InstanceStateNameTerminated { return drivers.StatusAllocated, nil } return drivers.StatusNone, nil @@ -535,7 +535,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask - for _, task := range d.tasks_list { + for _, task := range d.tasksList { if task.Name() == name { t = task.Clone() } diff --git a/lib/drivers/aws/task_image.go b/lib/drivers/aws/task_image.go index 13e0e03..cbedf52 100644 --- a/lib/drivers/aws/task_image.go +++ b/lib/drivers/aws/task_image.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/adobe/aquarium-fish/lib/drivers" "github.com/adobe/aquarium-fish/lib/log" @@ -90,55 +90,55 @@ func (t *TaskImage) Execute() (result []byte, err error) { } log.Debugf("AWS: TaskImage %s: Detecting block devices of the instance...", t.ApplicationTask.UID) - var block_devices []ec2_types.BlockDeviceMapping + var blockDevices []ec2types.BlockDeviceMapping // In case we need just the root disk (!Full) - let's get some additional data // We don't need to fill the block devices if we want a full image of the instance if !t.Full { // TODO: Probably better to use DescribeInstances // Look for the root device name of the instance - describe_input := ec2.DescribeInstanceAttributeInput{ + describeInput := ec2.DescribeInstanceAttributeInput{ InstanceId: aws.String(t.Resource.Identifier), - Attribute: ec2_types.InstanceAttributeNameRootDeviceName, + Attribute: ec2types.InstanceAttributeNameRootDeviceName, } - describe_resp, err := conn.DescribeInstanceAttribute(context.TODO(), &describe_input) + describeResp, err := conn.DescribeInstanceAttribute(context.TODO(), &describeInput) if err != nil { return []byte(`{"error":"internal: failed to request instance root device"}`), log.Errorf("AWS: Unable to request the instance RootDeviceName attribute %s: %v", t.Resource.Identifier, err) } - root_device := aws.ToString(describe_resp.RootDeviceName.Value) + rootDevice := aws.ToString(describeResp.RootDeviceName.Value) // Looking for the instance block device mappings to clarify what we need to include in the image - describe_input = ec2.DescribeInstanceAttributeInput{ + describeInput = ec2.DescribeInstanceAttributeInput{ InstanceId: aws.String(t.Resource.Identifier), - Attribute: ec2_types.InstanceAttributeNameBlockDeviceMapping, + Attribute: ec2types.InstanceAttributeNameBlockDeviceMapping, } - describe_resp, err = conn.DescribeInstanceAttribute(context.TODO(), &describe_input) + describeResp, err = conn.DescribeInstanceAttribute(context.TODO(), &describeInput) if err != nil { return []byte(`{"error":"internal: failed to request instance block device mapping"}`), log.Errorf("AWS: Unable to request the instance BlockDeviceMapping attribute %s: %v", t.Resource.Identifier, err) } // Filter the block devices in the image if we don't need full one - for _, dev := range describe_resp.BlockDeviceMappings { + for _, dev := range describeResp.BlockDeviceMappings { // Requesting volume to get necessary data for required Ebs field - mapping := ec2_types.BlockDeviceMapping{ + mapping := ec2types.BlockDeviceMapping{ DeviceName: dev.DeviceName, } - if root_device != aws.ToString(dev.DeviceName) { + if rootDevice != aws.ToString(dev.DeviceName) { mapping.NoDevice = aws.String("") } else { - log.Debugf("AWS: TaskImage %s: Only root disk will be used to create image: %s", t.ApplicationTask.UID, root_device) + log.Debugf("AWS: TaskImage %s: Only root disk will be used to create image: %s", t.ApplicationTask.UID, rootDevice) if dev.Ebs == nil { return []byte(`{"error":"internal: root disk of instance doesn't have EBS config"}`), log.Errorf("AWS: Root disk doesn't have EBS configuration") } params := ec2.DescribeVolumesInput{ VolumeIds: []string{aws.ToString(dev.Ebs.VolumeId)}, } - vol_resp, err := conn.DescribeVolumes(context.TODO(), ¶ms) - if err != nil || len(vol_resp.Volumes) < 1 { + volResp, err := conn.DescribeVolumes(context.TODO(), ¶ms) + if err != nil || len(volResp.Volumes) < 1 { return []byte(`{"error":"internal: failed to request instance volume info config"}`), log.Errorf("AWS: Unable to request the instance root volume info %s: %v", aws.ToString(dev.Ebs.VolumeId), err) } - vol_info := vol_resp.Volumes[0] - mapping.Ebs = &ec2_types.EbsBlockDevice{ + volInfo := volResp.Volumes[0] + mapping.Ebs = &ec2types.EbsBlockDevice{ DeleteOnTermination: dev.Ebs.DeleteOnTermination, //Encrypted: vol_info.Encrypted, //Iops: vol_info.Iops, @@ -146,30 +146,30 @@ func (t *TaskImage) Execute() (result []byte, err error) { //OutpostArn: vol_info.OutpostArn, //SnapshotId: vol_info.SnapshotId, //Throughput: vol_info.Throughput, - VolumeSize: vol_info.Size, - VolumeType: vol_info.VolumeType, + VolumeSize: volInfo.Size, + VolumeType: volInfo.VolumeType, } } - block_devices = append(block_devices, mapping) + blockDevices = append(blockDevices, mapping) } } else { log.Debugf("AWS: TaskImage %s: All the instance disks will be used for image", t.ApplicationTask.UID) } // Preparing the create image request - image_name := opts.Image + time.Now().UTC().Format("-060102.150405") + imageName := opts.Image + time.Now().UTC().Format("-060102.150405") if opts.TaskImageName != "" { - image_name = opts.TaskImageName + time.Now().UTC().Format("-060102.150405") + imageName = opts.TaskImageName + time.Now().UTC().Format("-060102.150405") } input := ec2.CreateImageInput{ InstanceId: aws.String(t.Resource.Identifier), - Name: aws.String(image_name), - BlockDeviceMappings: block_devices, + Name: aws.String(imageName), + BlockDeviceMappings: blockDevices, Description: aws.String("Created by AquariumFish"), NoReboot: aws.Bool(true), // Action wants to do that on running instance or already stopped one - TagSpecifications: []ec2_types.TagSpecification{{ - ResourceType: ec2_types.ResourceTypeImage, - Tags: []ec2_types.Tag{ + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeImage, + Tags: []ec2types.Tag{ { Key: aws.String("InstanceId"), Value: aws.String(t.Resource.Identifier), @@ -187,20 +187,20 @@ func (t *TaskImage) Execute() (result []byte, err error) { } if opts.TaskImageEncryptKey != "" { // Append tmp to the name since it's just a temporary image for further re-encryption - input.Name = aws.String("tmp_" + image_name) + input.Name = aws.String("tmp_" + imageName) } if t.ApplicationTask.When == types.ApplicationStatusDEALLOCATE { // Wait for instance stopped before going forward with image creation log.Infof("AWS: TaskImage %s: Wait for instance %q stopping...", t.ApplicationTask.UID, t.Resource.Identifier) sw := ec2.NewInstanceStoppedWaiter(conn) - max_wait := 10 * time.Minute - wait_input := ec2.DescribeInstancesInput{ + maxWait := 10 * time.Minute + waitInput := ec2.DescribeInstancesInput{ InstanceIds: []string{ t.Resource.Identifier, }, } - if err := sw.Wait(context.TODO(), &wait_input, max_wait); err != nil { + if err := sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Do not fail hard here - it's still possible to create image of the instance log.Errorf("AWS: TaskImage %s: Error during wait for instance %s stop: %v", t.ApplicationTask.UID, t.Resource.Identifier, err) } @@ -214,33 +214,33 @@ func (t *TaskImage) Execute() (result []byte, err error) { return []byte(`{"error":"internal: no image was created from instance"}`), log.Errorf("AWS: No image was created from instance %s", t.Resource.Identifier) } - image_id := aws.ToString(resp.ImageId) - log.Infof("AWS: TaskImage %s: Created image %q with id %q...", t.ApplicationTask.UID, aws.ToString(input.Name), image_id) + imageId := aws.ToString(resp.ImageId) + log.Infof("AWS: TaskImage %s: Created image %q with id %q...", t.ApplicationTask.UID, aws.ToString(input.Name), imageId) // Wait for the image to be completed, otherwise if we will start a copy - it will fail... - log.Infof("AWS: TaskImage %s: Wait for image %s %q availability...", t.ApplicationTask.UID, image_id, aws.ToString(input.Name)) + log.Infof("AWS: TaskImage %s: Wait for image %s %q availability...", t.ApplicationTask.UID, imageId, aws.ToString(input.Name)) sw := ec2.NewImageAvailableWaiter(conn) - max_wait := time.Duration(t.driver.cfg.ImageCreateWait) - wait_input := ec2.DescribeImagesInput{ + maxWait := time.Duration(t.driver.cfg.ImageCreateWait) + waitInput := ec2.DescribeImagesInput{ ImageIds: []string{ - image_id, + imageId, }, } - if err = sw.Wait(context.TODO(), &wait_input, max_wait); err != nil { + if err = sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Need to make sure tmp image will be removed, while target image could stay and complete if opts.TaskImageEncryptKey != "" { - log.Debugf("AWS: TaskImage %s: Cleanup the temp image %q", t.ApplicationTask.UID, image_id) - if err := t.driver.deleteImage(conn, image_id); err != nil { + log.Debugf("AWS: TaskImage %s: Cleanup the temp image %q", t.ApplicationTask.UID, imageId) + if err := t.driver.deleteImage(conn, imageId); err != nil { log.Errorf("AWS: TaskImage %s: Unable to cleanup the temp image %s: %v", t.ApplicationTask.UID, t.Resource.Identifier, err) } } - return []byte(`{"error":"internal: timeout on await for the image availability"}`), log.Error("AWS: Error during wait for the image availability:", image_id, aws.ToString(input.Name), err) + return []byte(`{"error":"internal: timeout on await for the image availability"}`), log.Error("AWS: Error during wait for the image availability:", imageId, aws.ToString(input.Name), err) } // If TaskImageEncryptKey is set - we need to copy the image with enabled encryption and delete the temp one if opts.TaskImageEncryptKey != "" { - copy_input := ec2.CopyImageInput{ - Name: aws.String(image_name), + copyInput := ec2.CopyImageInput{ + Name: aws.String(imageName), Description: input.Description, SourceImageId: resp.ImageId, SourceRegion: aws.String(t.driver.cfg.Region), @@ -248,8 +248,8 @@ func (t *TaskImage) Execute() (result []byte, err error) { Encrypted: aws.Bool(true), KmsKeyId: aws.String(opts.TaskImageEncryptKey), } - log.Infof("AWS: TaskImage %s: Re-encrypting tmp image to final image %q", t.ApplicationTask.UID, aws.ToString(copy_input.Name)) - resp, err := conn.CopyImage(context.TODO(), ©_input) + log.Infof("AWS: TaskImage %s: Re-encrypting tmp image to final image %q", t.ApplicationTask.UID, aws.ToString(copyInput.Name)) + resp, err := conn.CopyImage(context.TODO(), ©Input) if err != nil { return []byte(`{"error":"internal: failed to copy image"}`), log.Errorf("AWS: Unable to copy image from tmp image %s: %v", aws.ToString(resp.ImageId), err) } @@ -257,29 +257,29 @@ func (t *TaskImage) Execute() (result []byte, err error) { return []byte(`{"error":"internal: no image was copied"}`), log.Errorf("AWS: No image was copied from tmp image %s", aws.ToString(resp.ImageId)) } // Wait for the image to be completed, otherwise if we will delete the temp one right away it will fail... - log.Infof("AWS: TaskImage %s: Wait for re-encrypted image %s %q availability...", t.ApplicationTask.UID, aws.ToString(resp.ImageId), image_name) + log.Infof("AWS: TaskImage %s: Wait for re-encrypted image %s %q availability...", t.ApplicationTask.UID, aws.ToString(resp.ImageId), imageName) sw := ec2.NewImageAvailableWaiter(conn) - max_wait := time.Duration(t.driver.cfg.ImageCreateWait) - wait_input := ec2.DescribeImagesInput{ + maxWait := time.Duration(t.driver.cfg.ImageCreateWait) + waitInput := ec2.DescribeImagesInput{ ImageIds: []string{ aws.ToString(resp.ImageId), }, } - if err = sw.Wait(context.TODO(), &wait_input, max_wait); err != nil { + if err = sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Do not fail hard here - we still need to remove the tmp image - log.Errorf("AWS: TaskImage %s: Error during wait for re-encrypted image availability: %s %s, %v", t.ApplicationTask.UID, image_name, aws.ToString(resp.ImageId), err) + log.Errorf("AWS: TaskImage %s: Error during wait for re-encrypted image availability: %s %s, %v", t.ApplicationTask.UID, imageName, aws.ToString(resp.ImageId), err) } // Delete the temp image & associated snapshots - log.Debugf("AWS: TaskImage %s: Deleting the temp image %q", t.ApplicationTask.UID, image_id) - if err = t.driver.deleteImage(conn, image_id); err != nil { - return []byte(`{"error":"internal: unable to delete the tmp image"}`), log.Errorf("AWS: Unable to delete the temp image %s: %v", image_id, err) + log.Debugf("AWS: TaskImage %s: Deleting the temp image %q", t.ApplicationTask.UID, imageId) + if err = t.driver.deleteImage(conn, imageId); err != nil { + return []byte(`{"error":"internal: unable to delete the tmp image"}`), log.Errorf("AWS: Unable to delete the temp image %s: %v", imageId, err) } - image_id = aws.ToString(resp.ImageId) + imageId = aws.ToString(resp.ImageId) } - log.Infof("AWS: Created image for the instance %s: %s %q", t.Resource.Identifier, image_id, image_name) + log.Infof("AWS: Created image for the instance %s: %s %q", t.Resource.Identifier, imageId, imageName) - return json.Marshal(map[string]string{"image": image_id, "image_name": image_name}) + return json.Marshal(map[string]string{"image": imageId, "image_name": imageName}) } diff --git a/lib/drivers/aws/task_snapshot.go b/lib/drivers/aws/task_snapshot.go index f19b398..c3592fe 100644 --- a/lib/drivers/aws/task_snapshot.go +++ b/lib/drivers/aws/task_snapshot.go @@ -20,7 +20,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/adobe/aquarium-fish/lib/drivers" "github.com/adobe/aquarium-fish/lib/log" @@ -85,29 +85,29 @@ func (t *TaskSnapshot) Execute() (result []byte, err error) { // Wait for instance stopped before going forward with snapshot sw := ec2.NewInstanceStoppedWaiter(conn) - max_wait := 10 * time.Minute - wait_input := ec2.DescribeInstancesInput{ + maxWait := 10 * time.Minute + waitInput := ec2.DescribeInstancesInput{ InstanceIds: []string{ t.Resource.Identifier, }, } - if err := sw.Wait(context.TODO(), &wait_input, max_wait); err != nil { + if err := sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Do not fail hard here - it's still possible to take snapshot of the instance log.Errorf("AWS: TaskSnapshot %s: Error during wait for instance %s stop: %v", t.ApplicationTask.UID, t.Resource.Identifier, err) } } - spec := ec2_types.InstanceSpecification{ + spec := ec2types.InstanceSpecification{ ExcludeBootVolume: aws.Bool(!t.Full), InstanceId: aws.String(t.Resource.Identifier), } input := ec2.CreateSnapshotsInput{ InstanceSpecification: &spec, Description: aws.String("Created by AquariumFish"), - CopyTagsFromSource: ec2_types.CopyTagsFromSourceVolume, - TagSpecifications: []ec2_types.TagSpecification{{ - ResourceType: ec2_types.ResourceTypeSnapshot, - Tags: []ec2_types.Tag{ + CopyTagsFromSource: ec2types.CopyTagsFromSourceVolume, + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeSnapshot, + Tags: []ec2types.Tag{ { Key: aws.String("InstanceId"), Value: aws.String(t.Resource.Identifier), @@ -137,11 +137,11 @@ func (t *TaskSnapshot) Execute() (result []byte, err error) { // Wait for snapshots to be available... log.Infof("AWS: TaskSnapshot %s: Wait for snapshots %s availability...", t.ApplicationTask.UID, snapshots) sw := ec2.NewSnapshotCompletedWaiter(conn) - max_wait := time.Duration(t.driver.cfg.SnapshotCreateWait) - wait_input := ec2.DescribeSnapshotsInput{ + maxWait := time.Duration(t.driver.cfg.SnapshotCreateWait) + waitInput := ec2.DescribeSnapshotsInput{ SnapshotIds: snapshots, } - if err = sw.Wait(context.TODO(), &wait_input, max_wait); err != nil { + if err = sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Do not fail hard here - we still need to remove the tmp image log.Errorf("AWS: TaskSnapshot %s: Error during wait snapshots availability: %s, %v", t.ApplicationTask.UID, snapshots, err) } diff --git a/lib/drivers/aws/util.go b/lib/drivers/aws/util.go index e366662..114d947 100644 --- a/lib/drivers/aws/util.go +++ b/lib/drivers/aws/util.go @@ -84,15 +84,15 @@ func (d *Driver) newServiceQuotasConn() *servicequotas.Client { // Will verify and return subnet id // In case vpc id was provided - will chose the subnet with less used ip's // Returns the found subnet_id, total count of available ip's and error if some -func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, error) { +func (d *Driver) getSubnetId(conn *ec2.Client, idTag string) (string, int64, error) { filter := types.Filter{} // Check if the tag is provided (":") - if strings.Contains(id_tag, ":") { - log.Debug("AWS: Fetching tag vpc or subnet:", id_tag) - tag_key_val := strings.SplitN(id_tag, ":", 2) - filter.Name = aws.String("tag:" + tag_key_val[0]) - filter.Values = []string{tag_key_val[1]} + if strings.Contains(idTag, ":") { + log.Debug("AWS: Fetching tag vpc or subnet:", idTag) + tagKeyVal := strings.SplitN(idTag, ":", 2) + filter.Name = aws.String("tag:" + tagKeyVal[0]) + filter.Values = []string{tagKeyVal[1]} // Look for VPC with the defined tag req := ec2.DescribeVpcsInput{ @@ -120,21 +120,21 @@ func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, er if err != nil || len(resp.Subnets) == 0 { return "", 0, fmt.Errorf("AWS: Unable to locate vpc or subnet with specified tag: %s:%q, %v", aws.ToString(filter.Name), filter.Values, err) } - id_tag = aws.ToString(resp.Subnets[0].SubnetId) - return id_tag, int64(aws.ToInt32(resp.Subnets[0].AvailableIpAddressCount)), nil + idTag = aws.ToString(resp.Subnets[0].SubnetId) + return idTag, int64(aws.ToInt32(resp.Subnets[0].AvailableIpAddressCount)), nil } if len(resp.Vpcs) > 1 { - log.Warn("AWS: There is more than one vpc with the same tag:", id_tag) + log.Warn("AWS: There is more than one vpc with the same tag:", idTag) } - id_tag = aws.ToString(resp.Vpcs[0].VpcId) - log.Debug("AWS: Found VPC with id:", id_tag) + idTag = aws.ToString(resp.Vpcs[0].VpcId) + log.Debug("AWS: Found VPC with id:", idTag) } else { // If network id is not a subnet - process as vpc - if !strings.HasPrefix(id_tag, "subnet-") { - if id_tag != "" { + if !strings.HasPrefix(idTag, "subnet-") { + if idTag != "" { // Use VPC to verify it exists in the project filter.Name = aws.String("vpc-id") - filter.Values = []string{id_tag} + filter.Values = []string{idTag} } else { // Locate the default VPC filter.Name = aws.String("is-default") @@ -154,23 +154,23 @@ func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, er return "", 0, fmt.Errorf("AWS: No VPCs available in the project") } - if id_tag == "" { - id_tag = aws.ToString(resp.Vpcs[0].VpcId) - log.Debug("AWS: Using default VPC:", id_tag) - } else if id_tag != aws.ToString(resp.Vpcs[0].VpcId) { - return "", 0, fmt.Errorf("AWS: Unable to verify the vpc id: %q != %q", id_tag, aws.ToString(resp.Vpcs[0].VpcId)) + if idTag == "" { + idTag = aws.ToString(resp.Vpcs[0].VpcId) + log.Debug("AWS: Using default VPC:", idTag) + } else if idTag != aws.ToString(resp.Vpcs[0].VpcId) { + return "", 0, fmt.Errorf("AWS: Unable to verify the vpc id: %q != %q", idTag, aws.ToString(resp.Vpcs[0].VpcId)) } } } - if strings.HasPrefix(id_tag, "vpc-") { + if strings.HasPrefix(idTag, "vpc-") { // Filtering subnets by VPC id filter.Name = aws.String("vpc-id") - filter.Values = []string{id_tag} + filter.Values = []string{idTag} } else { // Check subnet exists in the project filter.Name = aws.String("subnet-id") - filter.Values = []string{id_tag} + filter.Values = []string{idTag} } req := ec2.DescribeSubnetsInput{ Filters: []types.Filter{ @@ -185,42 +185,42 @@ func (d *Driver) getSubnetId(conn *ec2.Client, id_tag string) (string, int64, er return "", 0, fmt.Errorf("AWS: No Subnets available in the project") } - if strings.HasPrefix(id_tag, "vpc-") { + if strings.HasPrefix(idTag, "vpc-") { // Chose the less used subnet in VPC - var curr_count int32 = 0 - var total_ip_count int64 = 0 + var currCount int32 = 0 + var totalIpCount int64 = 0 for _, subnet := range resp.Subnets { - total_ip_count += int64(aws.ToInt32(subnet.AvailableIpAddressCount)) - if curr_count < aws.ToInt32(subnet.AvailableIpAddressCount) { - id_tag = aws.ToString(subnet.SubnetId) - curr_count = aws.ToInt32(subnet.AvailableIpAddressCount) + totalIpCount += int64(aws.ToInt32(subnet.AvailableIpAddressCount)) + if currCount < aws.ToInt32(subnet.AvailableIpAddressCount) { + idTag = aws.ToString(subnet.SubnetId) + currCount = aws.ToInt32(subnet.AvailableIpAddressCount) } } - if curr_count == 0 { + if currCount == 0 { return "", 0, fmt.Errorf("AWS: Subnets have no available IP addresses") } - return id_tag, total_ip_count, nil - } else if id_tag != aws.ToString(resp.Subnets[0].SubnetId) { - return "", 0, fmt.Errorf("AWS: Unable to verify the subnet id: %q != %q", id_tag, aws.ToString(resp.Subnets[0].SubnetId)) + return idTag, totalIpCount, nil + } else if idTag != aws.ToString(resp.Subnets[0].SubnetId) { + return "", 0, fmt.Errorf("AWS: Unable to verify the subnet id: %q != %q", idTag, aws.ToString(resp.Subnets[0].SubnetId)) } - return id_tag, int64(aws.ToInt32(resp.Subnets[0].AvailableIpAddressCount)), nil + return idTag, int64(aws.ToInt32(resp.Subnets[0].AvailableIpAddressCount)), nil } // Will verify and return image id -func (d *Driver) getImageId(conn *ec2.Client, id_name string) (string, error) { - if strings.HasPrefix(id_name, "ami-") { - return id_name, nil +func (d *Driver) getImageId(conn *ec2.Client, idName string) (string, error) { + if strings.HasPrefix(idName, "ami-") { + return idName, nil } - log.Debug("AWS: Looking for image name:", id_name) + log.Debug("AWS: Looking for image name:", idName) // Look for image with the defined name req := ec2.DescribeImagesInput{ Filters: []types.Filter{ { Name: aws.String("name"), - Values: []string{id_name}, + Values: []string{idName}, }, { Name: aws.String("state"), @@ -232,20 +232,20 @@ func (d *Driver) getImageId(conn *ec2.Client, id_name string) (string, error) { p := ec2.NewDescribeImagesPaginator(conn, &req) resp, err := conn.DescribeImages(context.TODO(), &req) if err != nil || len(resp.Images) == 0 { - return "", fmt.Errorf("AWS: Unable to locate image with specified name: %s, err: %v", id_name, err) + return "", fmt.Errorf("AWS: Unable to locate image with specified name: %s, err: %v", idName, err) } - id_name = aws.ToString(resp.Images[0].ImageId) + idName = aws.ToString(resp.Images[0].ImageId) // Getting the images and find the latest one - var found_id string - var found_time time.Time + var foundId string + var foundTime time.Time for p.HasMorePages() { resp, err := p.NextPage(context.TODO()) if err != nil { return "", fmt.Errorf("AWS: Error during requesting snapshot: %v", err) } if len(resp.Images) > 100 { - log.Warnf("AWS: Over 100 images was found for the name %q, could be slow...", id_name) + log.Warnf("AWS: Over 100 images was found for the name %q, could be slow...", idName) } for _, r := range resp.Images { // Converting from RFC-3339/ISO-8601 format "2024-03-07T15:53:03.000Z" @@ -254,80 +254,80 @@ func (d *Driver) getImageId(conn *ec2.Client, id_name string) (string, error) { log.Warnf("AWS: Error during parsing image create time: %v", err) continue } - if found_time.Before(t) { - found_id = aws.ToString(r.ImageId) - found_time = t + if foundTime.Before(t) { + foundId = aws.ToString(r.ImageId) + foundTime = t } } } - if found_id == "" { - return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", id_name) + if foundId == "" { + return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", idName) } - return found_id, nil + return foundId, nil } // Types are used to calculate some not that obvious values -func (d *Driver) getTypes(conn *ec2.Client, instance_types []string) (map[string]types.InstanceTypeInfo, error) { +func (d *Driver) getTypes(conn *ec2.Client, instanceTypes []string) (map[string]types.InstanceTypeInfo, error) { out := make(map[string]types.InstanceTypeInfo) req := ec2.DescribeInstanceTypesInput{} - for _, typ := range instance_types { + for _, typ := range instanceTypes { req.InstanceTypes = append(req.InstanceTypes, types.InstanceType(typ)) } resp, err := conn.DescribeInstanceTypes(context.TODO(), &req) if err != nil || len(resp.InstanceTypes) == 0 { - return out, fmt.Errorf("AWS: Unable to locate instance types with specified name %q: %v", instance_types, err) + return out, fmt.Errorf("AWS: Unable to locate instance types with specified name %q: %v", instanceTypes, err) } for i, typ := range resp.InstanceTypes { out[string(typ.InstanceType)] = resp.InstanceTypes[i] } - if len(resp.InstanceTypes) != len(instance_types) { - not_found := []string{} - for _, typ := range instance_types { + if len(resp.InstanceTypes) != len(instanceTypes) { + notFound := []string{} + for _, typ := range instanceTypes { if _, ok := out[typ]; !ok { - not_found = append(not_found, typ) + notFound = append(notFound, typ) } } - return out, fmt.Errorf("AWS: Unable to locate all the requested types %q: %q", instance_types, not_found) + return out, fmt.Errorf("AWS: Unable to locate all the requested types %q: %q", instanceTypes, notFound) } return out, nil } // Will return latest available image for the instance type -func (d *Driver) getImageIdByType(conn *ec2.Client, instance_type string) (string, error) { - log.Debug("AWS: Looking an image for type:", instance_type) +func (d *Driver) getImageIdByType(conn *ec2.Client, instanceType string) (string, error) { + log.Debug("AWS: Looking an image for type:", instanceType) - inst_types, err := d.getTypes(conn, []string{instance_type}) + instTypes, err := d.getTypes(conn, []string{instanceType}) if err != nil { - return "", fmt.Errorf("AWS: Unable to find instance type %q: %v", instance_type, err) + return "", fmt.Errorf("AWS: Unable to find instance type %q: %v", instanceType, err) } - if inst_types[instance_type].ProcessorInfo == nil || len(inst_types[instance_type].ProcessorInfo.SupportedArchitectures) < 1 { - return "", fmt.Errorf("AWS: The instance type doesn't have needed processor arch params %q: %v", instance_type, err) + if instTypes[instanceType].ProcessorInfo == nil || len(instTypes[instanceType].ProcessorInfo.SupportedArchitectures) < 1 { + return "", fmt.Errorf("AWS: The instance type doesn't have needed processor arch params %q: %v", instanceType, err) } - type_arch := inst_types[instance_type].ProcessorInfo.SupportedArchitectures[0] - log.Debug("AWS: Looking an image for type: found arch:", type_arch) + typeArch := instTypes[instanceType].ProcessorInfo.SupportedArchitectures[0] + log.Debug("AWS: Looking an image for type: found arch:", typeArch) // Look for base image from aws with the defined architecture // We checking last year and if it's empty - trying past years until will find the image - images_till := time.Now() - for images_till.Year() > time.Now().Year()-10 { // Probably past 10 years will work for everyone, right? - log.Debugf("AWS: Looking an image: Checking past year from %d", images_till.Year()) + imagesTill := time.Now() + for imagesTill.Year() > time.Now().Year()-10 { // Probably past 10 years will work for everyone, right? + log.Debugf("AWS: Looking an image: Checking past year from %d", imagesTill.Year()) req := ec2.DescribeImagesInput{ Filters: []types.Filter{ { Name: aws.String("architecture"), - Values: []string{string(type_arch)}, + Values: []string{string(typeArch)}, }, { Name: aws.String("creation-date"), - Values: awsLastYearFilterValues(images_till), + Values: awsLastYearFilterValues(imagesTill), }, { Name: aws.String("is-public"), @@ -345,41 +345,41 @@ func (d *Driver) getImageIdByType(conn *ec2.Client, instance_type string) (strin } resp, err := conn.DescribeImages(context.TODO(), &req) if err != nil { - log.Errorf("AWS: Error during request to find image with arch %q for year %d: %v", type_arch, images_till.Year(), err) - images_till = images_till.AddDate(-1, 0, 0) + log.Errorf("AWS: Error during request to find image with arch %q for year %d: %v", typeArch, imagesTill.Year(), err) + imagesTill = imagesTill.AddDate(-1, 0, 0) continue } if len(resp.Images) == 0 { // No images this year, let's reiterate with previous year - log.Infof("AWS: Unable to find any images of arch %q till year %d: %v %v", type_arch, images_till.Year(), req, resp) - images_till = images_till.AddDate(-1, 0, 0) + log.Infof("AWS: Unable to find any images of arch %q till year %d: %v %v", typeArch, imagesTill.Year(), req, resp) + imagesTill = imagesTill.AddDate(-1, 0, 0) continue } - image_id := aws.ToString(resp.Images[0].ImageId) + imageId := aws.ToString(resp.Images[0].ImageId) - log.Debugf("AWS: Found image for specified type %q (arch %s): %s", instance_type, type_arch, image_id) + log.Debugf("AWS: Found image for specified type %q (arch %s): %s", instanceType, typeArch, imageId) - return image_id, nil + return imageId, nil } - return "", fmt.Errorf("AWS: Unable to locate image for type %q (arch %s) till year %d", instance_type, type_arch, images_till.Year()+1) + return "", fmt.Errorf("AWS: Unable to locate image for type %q (arch %s) till year %d", instanceType, typeArch, imagesTill.Year()+1) } // Will verify and return security group id -func (d *Driver) getSecGroupId(conn *ec2.Client, id_name string) (string, error) { - if strings.HasPrefix(id_name, "sg-") { - return id_name, nil +func (d *Driver) getSecGroupId(conn *ec2.Client, idName string) (string, error) { + if strings.HasPrefix(idName, "sg-") { + return idName, nil } - log.Debug("AWS: Looking for security group name:", id_name) + log.Debug("AWS: Looking for security group name:", idName) // Look for security group with the defined name req := ec2.DescribeSecurityGroupsInput{ Filters: []types.Filter{ { Name: aws.String("group-name"), - Values: []string{id_name}, + Values: []string{idName}, }, { Name: aws.String("owner-id"), @@ -392,31 +392,31 @@ func (d *Driver) getSecGroupId(conn *ec2.Client, id_name string) (string, error) return "", fmt.Errorf("AWS: Unable to locate security group with specified name: %v", err) } if len(resp.SecurityGroups) > 1 { - log.Warn("AWS: There is more than one group with the same name:", id_name) + log.Warn("AWS: There is more than one group with the same name:", idName) } - id_name = aws.ToString(resp.SecurityGroups[0].GroupId) + idName = aws.ToString(resp.SecurityGroups[0].GroupId) - return id_name, nil + return idName, nil } // Will verify and return latest snapshot id -func (d *Driver) getSnapshotId(conn *ec2.Client, id_tag string) (string, error) { - if strings.HasPrefix(id_tag, "snap-") { - return id_tag, nil +func (d *Driver) getSnapshotId(conn *ec2.Client, idTag string) (string, error) { + if strings.HasPrefix(idTag, "snap-") { + return idTag, nil } - if !strings.Contains(id_tag, ":") { - return "", fmt.Errorf("AWS: Incorrect snapshot tag format: %s", id_tag) + if !strings.Contains(idTag, ":") { + return "", fmt.Errorf("AWS: Incorrect snapshot tag format: %s", idTag) } - log.Debug("AWS: Fetching snapshot tag:", id_tag) - tag_key_val := strings.SplitN(id_tag, ":", 2) + log.Debug("AWS: Fetching snapshot tag:", idTag) + tagKeyVal := strings.SplitN(idTag, ":", 2) // Look for VPC with the defined tag over pages req := ec2.DescribeSnapshotsInput{ Filters: []types.Filter{ { - Name: aws.String("tag:" + tag_key_val[0]), - Values: []string{tag_key_val[1]}, + Name: aws.String("tag:" + tagKeyVal[0]), + Values: []string{tagKeyVal[1]}, }, { Name: aws.String("status"), @@ -428,33 +428,33 @@ func (d *Driver) getSnapshotId(conn *ec2.Client, id_tag string) (string, error) p := ec2.NewDescribeSnapshotsPaginator(conn, &req) // Getting the snapshots to find the latest one - found_id := "" - var found_time time.Time + foundId := "" + var foundTime time.Time for p.HasMorePages() { resp, err := p.NextPage(context.TODO()) if err != nil { return "", fmt.Errorf("AWS: Error during requesting snapshot: %v", err) } if len(resp.Snapshots) > 900 { - log.Warn("AWS: Over 900 snapshots was found for tag, could be slow:", id_tag) + log.Warn("AWS: Over 900 snapshots was found for tag, could be slow:", idTag) } for _, r := range resp.Snapshots { - if found_time.Before(aws.ToTime(r.StartTime)) { - found_id = aws.ToString(r.SnapshotId) - found_time = aws.ToTime(r.StartTime) + if foundTime.Before(aws.ToTime(r.StartTime)) { + foundId = aws.ToString(r.SnapshotId) + foundTime = aws.ToTime(r.StartTime) } } } - if found_id == "" { - return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", id_tag) + if foundId == "" { + return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", idTag) } - return found_id, nil + return foundId, nil } -func (d *Driver) getProjectCpuUsage(conn *ec2.Client, inst_types []string) (int64, error) { - var cpu_count int64 +func (d *Driver) getProjectCpuUsage(conn *ec2.Client, instTypes []string) (int64, error) { + var cpuCount int64 // Here is no way to use some filter, so we're getting them all and after that // checking if the instance is actually starts with type+number. @@ -477,23 +477,23 @@ func (d *Driver) getProjectCpuUsage(conn *ec2.Client, inst_types []string) (int6 } for _, res := range resp.Reservations { for _, inst := range res.Instances { - if awsInstTypeAny(string(inst.InstanceType), inst_types...) { + if awsInstTypeAny(string(inst.InstanceType), instTypes...) { // Maybe it is a better idea to check the instance type vCPU's... - cpu_count += int64(aws.ToInt32(inst.CpuOptions.CoreCount) * aws.ToInt32(inst.CpuOptions.ThreadsPerCore)) + cpuCount += int64(aws.ToInt32(inst.CpuOptions.CoreCount) * aws.ToInt32(inst.CpuOptions.ThreadsPerCore)) } } } } - return cpu_count, nil + return cpuCount, nil } -func (d *Driver) getInstance(conn *ec2.Client, inst_id string) (*types.Instance, error) { +func (d *Driver) getInstance(conn *ec2.Client, instId string) (*types.Instance, error) { input := ec2.DescribeInstancesInput{ Filters: []types.Filter{ { Name: aws.String("instance-id"), - Values: []string{inst_id}, + Values: []string{instId}, }, }, } @@ -509,12 +509,12 @@ func (d *Driver) getInstance(conn *ec2.Client, inst_id string) (*types.Instance, } // Will get the kms key id based on alias if it's specified -func (d *Driver) getKeyId(id_alias string) (string, error) { - if !strings.HasPrefix(id_alias, "alias/") { - return id_alias, nil +func (d *Driver) getKeyId(idAlias string) (string, error) { + if !strings.HasPrefix(idAlias, "alias/") { + return idAlias, nil } - log.Debug("AWS: Fetching key alias:", id_alias) + log.Debug("AWS: Fetching key alias:", idAlias) conn := d.newKMSConn() @@ -531,36 +531,36 @@ func (d *Driver) getKeyId(id_alias string) (string, error) { return "", fmt.Errorf("AWS: Error during requesting alias list: %v", err) } if len(resp.Aliases) > 90 { - log.Warn("AWS: Over 90 aliases was found, could be slow:", id_alias) + log.Warn("AWS: Over 90 aliases was found, could be slow:", idAlias) } for _, r := range resp.Aliases { - if id_alias == aws.ToString(r.AliasName) { + if idAlias == aws.ToString(r.AliasName) { return aws.ToString(r.TargetKeyId), nil } } } - return "", fmt.Errorf("AWS: Unable to locate kms key id with specified alias: %s", id_alias) + return "", fmt.Errorf("AWS: Unable to locate kms key id with specified alias: %s", idAlias) } func (d *Driver) updateQuotas(force bool) error { - d.quotas_mutex.Lock() - defer d.quotas_mutex.Unlock() + d.quotasMutex.Lock() + defer d.quotasMutex.Unlock() - if !force && d.quotas_next_update.After(time.Now()) { + if !force && d.quotasNextUpdate.After(time.Now()) { return nil } log.Debug("AWS: Updating quotas...") // Update the cache - conn_sq := d.newServiceQuotasConn() + connSq := d.newServiceQuotasConn() // Get the list of quotas req := servicequotas.ListServiceQuotasInput{ ServiceCode: aws.String("ec2"), } - p := servicequotas.NewListServiceQuotasPaginator(conn_sq, &req) + p := servicequotas.NewListServiceQuotasPaginator(connSq, &req) // Processing the received quotas for p.HasMorePages() { @@ -577,21 +577,21 @@ func (d *Driver) updateQuotas(force bool) error { log.Debug("AWS: Quotas:", d.quotas) - d.quotas_next_update = time.Now().Add(time.Minute * 30) + d.quotasNextUpdate = time.Now().Add(time.Minute * 30) return nil } // Checks if the value starts with any of the options and followed by digit func awsInstTypeAny(val string, options ...string) bool { - var char_after_opt byte + var charAfterOpt byte for _, opt := range options { // Here we check that strings starts with the prefix in options if strings.HasPrefix(val, opt) { // And followed by a digit from 1 to 9 (otherwise type "h" could be mixed with "hpc") // We're not expecting unicode chars here so byte comparison works just well - char_after_opt = val[len(opt)] - if char_after_opt >= '1' && char_after_opt <= '9' { + charAfterOpt = val[len(opt)] + if charAfterOpt >= '1' && charAfterOpt <= '9' { return true } } @@ -605,24 +605,24 @@ func awsInstTypeAny(val string, options ...string) bool { * Creates and immediately terminates instance to trigger scrubbing process on mac hosts. * Used during mac dedicated hosts pool management to deal with 24h limit to save on budget. */ -func (d *Driver) triggerHostScrubbing(host_id, instance_type string) (err error) { +func (d *Driver) triggerHostScrubbing(hostId, instanceType string) (err error) { conn := d.newEC2Conn() // Just need an image, which we could find by looking at the host instance type - var vm_image string - if vm_image, err = d.getImageIdByType(conn, instance_type); err != nil { - return fmt.Errorf("AWS: scrubbing %s: Unable to find image: %v", host_id, err) + var vmImage string + if vmImage, err = d.getImageIdByType(conn, instanceType); err != nil { + return fmt.Errorf("AWS: scrubbing %s: Unable to find image: %v", hostId, err) } - log.Infof("AWS: scrubbing %s: Selected image: %q", host_id, vm_image) + log.Infof("AWS: scrubbing %s: Selected image: %q", hostId, vmImage) // Prepare Instance request information placement := types.Placement{ Tenancy: types.TenancyHost, - HostId: aws.String(host_id), + HostId: aws.String(hostId), } input := ec2.RunInstancesInput{ - ImageId: aws.String(vm_image), - InstanceType: types.InstanceType(instance_type), + ImageId: aws.String(vmImage), + InstanceType: types.InstanceType(instanceType), // Set placement to the target host Placement: &placement, @@ -634,10 +634,10 @@ func (d *Driver) triggerHostScrubbing(host_id, instance_type string) (err error) // Run the instance result, err := conn.RunInstances(context.TODO(), &input) if err != nil { - return log.Errorf("AWS: scrubbing %s: Unable to run instance: %v", host_id, err) + return log.Errorf("AWS: scrubbing %s: Unable to run instance: %v", hostId, err) } - inst_id := aws.ToString(result.Instances[0].InstanceId) + instId := aws.ToString(result.Instances[0].InstanceId) // Don't need to wait - let's terminate the instance right away // We need to terminate no matter wat - so repeating until it will be terminated, otherwise @@ -645,18 +645,18 @@ func (d *Driver) triggerHostScrubbing(host_id, instance_type string) (err error) for { input := ec2.TerminateInstancesInput{ - InstanceIds: []string{inst_id}, + InstanceIds: []string{instId}, } result, err := conn.TerminateInstances(context.TODO(), &input) if err != nil || len(result.TerminatingInstances) < 1 { - log.Errorf("AWS: scrubbing %s: Error during termianting the instance %s: %s", host_id, inst_id, err) + log.Errorf("AWS: scrubbing %s: Error during termianting the instance %s: %s", hostId, instId, err) time.Sleep(10 * time.Second) continue } - if aws.ToString(result.TerminatingInstances[0].InstanceId) != inst_id { - log.Errorf("AWS: scrubbing %s: Wrong instance id result %s during terminating of %s", host_id, aws.ToString(result.TerminatingInstances[0].InstanceId), inst_id) + if aws.ToString(result.TerminatingInstances[0].InstanceId) != instId { + log.Errorf("AWS: scrubbing %s: Wrong instance id result %s during terminating of %s", hostId, aws.ToString(result.TerminatingInstances[0].InstanceId), instId) time.Sleep(10 * time.Second) continue } @@ -664,7 +664,7 @@ func (d *Driver) triggerHostScrubbing(host_id, instance_type string) (err error) break } - log.Infof("AWS: scrubbing %s: Scrubbing process was triggered", host_id) + log.Infof("AWS: scrubbing %s: Scrubbing process was triggered", hostId) return nil } @@ -681,8 +681,8 @@ func (d *Driver) deleteImage(conn *ec2.Client, id string) (err error) { ImageIds: []string{id}, Owners: d.cfg.AccountIDs, } - resp_img, err := conn.DescribeImages(context.TODO(), &req) - if err != nil || len(resp_img.Images) == 0 { + respImg, err := conn.DescribeImages(context.TODO(), &req) + if err != nil || len(respImg.Images) == 0 { return fmt.Errorf("AWS: Unable to describe image with specified id %q: %w", id, err) } @@ -690,21 +690,21 @@ func (d *Driver) deleteImage(conn *ec2.Client, id string) (err error) { input := ec2.DeregisterImageInput{ImageId: aws.String(id)} _, err = conn.DeregisterImage(context.TODO(), &input) if err != nil { - return fmt.Errorf("AWS: Unable to deregister the image %s %q: %w", id, aws.ToString(resp_img.Images[0].Name), err) + return fmt.Errorf("AWS: Unable to deregister the image %s %q: %w", id, aws.ToString(respImg.Images[0].Name), err) } // Delete the image snapshots - for _, disk := range resp_img.Images[0].BlockDeviceMappings { + for _, disk := range respImg.Images[0].BlockDeviceMappings { if disk.Ebs == nil || disk.Ebs.SnapshotId == nil { continue } log.Debugf("AWS: Deleting the image %s associated snapshot %s", id, aws.ToString(disk.Ebs.SnapshotId)) input := ec2.DeleteSnapshotInput{SnapshotId: disk.Ebs.SnapshotId} - _, err_tmp := conn.DeleteSnapshot(context.TODO(), &input) - if err_tmp != nil { + _, errTmp := conn.DeleteSnapshot(context.TODO(), &input) + if errTmp != nil { // Do not fail hard to try to delete all the snapshots - log.Errorf("AWS: Unable to delete image %s %q snapshot %s: %v", id, aws.ToString(resp_img.Images[0].Name), aws.ToString(disk.Ebs.SnapshotId), err) - err = err_tmp + log.Errorf("AWS: Unable to delete image %s %q snapshot %s: %v", id, aws.ToString(respImg.Images[0].Name), aws.ToString(disk.Ebs.SnapshotId), err) + err = errTmp } } diff --git a/lib/drivers/docker/driver.go b/lib/drivers/docker/driver.go index f8a204c..23b173c 100644 --- a/lib/drivers/docker/driver.go +++ b/lib/drivers/docker/driver.go @@ -49,13 +49,13 @@ func init() { type Driver struct { cfg Config // Contains the available tasks of the driver - tasks_list []drivers.ResourceDriverTask + tasksList []drivers.ResourceDriverTask - total_cpu uint // In logical threads - total_ram uint // In RAM megabytes + totalCpu uint // In logical threads + totalRam uint // In RAM megabytes - docker_usage_mutex sync.Mutex - docker_usage types.Resources // Used when the docker is remote + dockerUsageMutex sync.Mutex + dockerUsage types.Resources // Used when the docker is remote } func (d *Driver) Name() string { @@ -81,24 +81,24 @@ func (d *Driver) Prepare(config []byte) error { if err != nil { return fmt.Errorf("Docker: Unable to get system info to find the available resources: %v", err) } - cpu_mem := strings.Split(strings.TrimSpace(stdout), ",") - if len(cpu_mem) < 2 { - return fmt.Errorf("Docker: Not enough info values in return: %q", cpu_mem) + cpuMem := strings.Split(strings.TrimSpace(stdout), ",") + if len(cpuMem) < 2 { + return fmt.Errorf("Docker: Not enough info values in return: %q", cpuMem) } - parsed_cpu, err := strconv.ParseUint(cpu_mem[0], 10, 64) + parsedCpu, err := strconv.ParseUint(cpuMem[0], 10, 64) if err != nil { - return fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpu_mem[0]) + return fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpuMem[0]) } - d.total_cpu = uint(parsed_cpu / 1000000000) // Originally in NCPU - parsed_ram, err := strconv.ParseUint(cpu_mem[1], 10, 64) + d.totalCpu = uint(parsedCpu / 1000000000) // Originally in NCPU + parsedRam, err := strconv.ParseUint(cpuMem[1], 10, 64) if err != nil { - return fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpu_mem[1]) + return fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpuMem[1]) } - d.total_ram = uint(parsed_ram / 1073741824) // Get in GB + d.totalRam = uint(parsedRam / 1073741824) // Get in GB // Collect the current state of docker containers for validation (for example not controlled // containers) purposes - it will be actively used if docker driver is remote - d.docker_usage, err = d.getInitialUsage() + d.dockerUsage, err = d.getInitialUsage() if err != nil { return err } @@ -119,55 +119,55 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { } // Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDefinition) int64 { - var out_count int64 +func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { + var outCount int64 if d.cfg.IsRemote { // It's remote so use the driver-calculated usage - d.docker_usage_mutex.Lock() - node_usage = d.docker_usage - d.docker_usage_mutex.Unlock() + d.dockerUsageMutex.Lock() + nodeUsage = d.dockerUsage + d.dockerUsageMutex.Unlock() } - avail_cpu, avail_ram := d.getAvailResources() + availCpu, availRam := d.getAvailResources() // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > avail_cpu { + if req.Resources.Cpu > availCpu { return 0 } - if req.Resources.Ram > avail_ram { + if req.Resources.Ram > availRam { return 0 } // TODO: Check disk requirements // Since we have the required resources - let's check if tenancy allows us to expand them to // run more tenants here - if node_usage.IsEmpty() { + if nodeUsage.IsEmpty() { // In case we dealing with the first one - we need to set usage modificators, otherwise // those values will mess up the next calculations - node_usage.Multitenancy = req.Resources.Multitenancy - node_usage.CpuOverbook = req.Resources.CpuOverbook - node_usage.RamOverbook = req.Resources.RamOverbook + nodeUsage.Multitenancy = req.Resources.Multitenancy + nodeUsage.CpuOverbook = req.Resources.CpuOverbook + nodeUsage.RamOverbook = req.Resources.RamOverbook } - if node_usage.Multitenancy && req.Resources.Multitenancy { + if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much - if node_usage.CpuOverbook && req.Resources.CpuOverbook { - avail_cpu += d.cfg.CpuOverbook + if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { + availCpu += d.cfg.CpuOverbook } - if node_usage.RamOverbook && req.Resources.RamOverbook { - avail_ram += d.cfg.RamOverbook + if nodeUsage.RamOverbook && req.Resources.RamOverbook { + availRam += d.cfg.RamOverbook } } // Calculate how much of those definitions we could run - out_count = int64((avail_cpu - node_usage.Cpu) / req.Resources.Cpu) - ram_count := int64((avail_ram - node_usage.Ram) / req.Resources.Ram) - if out_count > ram_count { - out_count = ram_count + outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + if outCount > ramCount { + outCount = ramCount } // TODO: Add disks into equation - return out_count + return outCount } /** @@ -179,8 +179,8 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDe func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { if d.cfg.IsRemote { // It's remote so let's use docker_usage to store modificators properly - d.docker_usage_mutex.Lock() - defer d.docker_usage_mutex.Unlock() + d.dockerUsageMutex.Lock() + defer d.dockerUsageMutex.Unlock() } var opts Options if err := opts.Apply(def.Options); err != nil { @@ -190,8 +190,8 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Generate unique id from the hw address and required directories buf := crypt.RandBytes(6) buf[0] = (buf[0] | 2) & 0xfe // Set local bit, ensure unicast address - c_hwaddr := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) - c_name := d.getContainerName(c_hwaddr) + cHwaddr := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) + cName := d.getContainerName(cHwaddr) // Create the docker network // TODO: For now hostonly is only works properly (allows access to host @@ -199,66 +199,66 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // have the separated container `hostonly` which allows only // host.docker.internal access, but others to drop and to use it as // `--net container:hostonly` in other containers in the future. - c_network := def.Resources.Network - if c_network == "" { - c_network = "hostonly" - } - if !d.isNetworkExists(c_network) { - net_args := []string{"network", "create", "-d", "bridge"} - if c_network == "hostonly" { - net_args = append(net_args, "--internal") + cNetwork := def.Resources.Network + if cNetwork == "" { + cNetwork = "hostonly" + } + if !d.isNetworkExists(cNetwork) { + netArgs := []string{"network", "create", "-d", "bridge"} + if cNetwork == "hostonly" { + netArgs = append(netArgs, "--internal") } - net_args = append(net_args, "aquarium-"+c_network) - if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, net_args...); err != nil { + netArgs = append(netArgs, "aquarium-"+cNetwork) + if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, netArgs...); err != nil { return nil, err } } // Load the images - img_name_version, err := d.loadImages(&opts) + imgNameVersion, err := d.loadImages(&opts) if err != nil { return nil, err } // Set the arguments to run the container - run_args := []string{"run", "--detach", - "--name", c_name, - "--mac-address", c_hwaddr, - "--network", "aquarium-" + c_network, + runArgs := []string{"run", "--detach", + "--name", cName, + "--mac-address", cHwaddr, + "--network", "aquarium-" + cNetwork, "--cpus", fmt.Sprintf("%d", def.Resources.Cpu), "--memory", fmt.Sprintf("%dg", def.Resources.Ram), "--pull", "never", } // Create and connect volumes to container - if err := d.disksCreate(c_name, &run_args, def.Resources.Disks); err != nil { + if err := d.disksCreate(cName, &runArgs, def.Resources.Disks); err != nil { return nil, log.Error("Docker: Unable to create the required disks:", err) } // Create env file - env_path, err := d.envCreate(c_name, metadata) + envPath, err := d.envCreate(cName, metadata) if err != nil { return nil, log.Error("Docker: Unable to create the env file:", err) } // Add env-file to run args - run_args = append(run_args, "--env-file", env_path) + runArgs = append(runArgs, "--env-file", envPath) // Deleting the env file when container is running to keep secrets - defer os.Remove(env_path) + defer os.Remove(envPath) // Run the container - run_args = append(run_args, img_name_version) - if _, _, err := runAndLog(30*time.Second, d.cfg.DockerPath, run_args...); err != nil { - return nil, log.Error("Docker: Unable to run container", c_name, err) + runArgs = append(runArgs, imgNameVersion) + if _, _, err := runAndLog(30*time.Second, d.cfg.DockerPath, runArgs...); err != nil { + return nil, log.Error("Docker: Unable to run container", cName, err) } if d.cfg.IsRemote { // Locked in the beginning of the function - d.docker_usage.Add(def.Resources) + d.dockerUsage.Add(def.Resources) } - log.Info("Docker: Allocate of Container completed:", c_hwaddr, c_name) + log.Info("Docker: Allocate of Container completed:", cHwaddr, cName) - return &types.Resource{Identifier: c_name, HwAddr: c_hwaddr}, nil + return &types.Resource{Identifier: cName, HwAddr: cHwaddr}, nil } func (d *Driver) Status(res *types.Resource) (string, error) { @@ -274,7 +274,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask - for _, task := range d.tasks_list { + for _, task := range d.tasksList { if task.Name() == name { t = task.Clone() } @@ -297,65 +297,65 @@ func (d *Driver) Deallocate(res *types.Resource) error { } if d.cfg.IsRemote { // It's remote so let's use docker_usage to store modificators properly - d.docker_usage_mutex.Lock() - defer d.docker_usage_mutex.Unlock() + d.dockerUsageMutex.Lock() + defer d.dockerUsageMutex.Unlock() } - c_name := d.getContainerName(res.Identifier) - c_id := d.getAllocatedContainerId(res.Identifier) - if len(c_id) == 0 { + cName := d.getContainerName(res.Identifier) + cId := d.getAllocatedContainerId(res.Identifier) + if len(cId) == 0 { return log.Error("Docker: Unable to find container with identifier:", res.Identifier) } // Getting the mounted volumes stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "inspect", - "--format", "{{ range .Mounts }}{{ println .Source }}{{ end }}", c_id, + "--format", "{{ range .Mounts }}{{ println .Source }}{{ end }}", cId, ) if err != nil { - return log.Error("Docker: Unable to inspect the container:", c_name, err) + return log.Error("Docker: Unable to inspect the container:", cName, err) } - c_volumes := strings.Split(strings.TrimSpace(stdout), "\n") + cVolumes := strings.Split(strings.TrimSpace(stdout), "\n") if d.cfg.IsRemote { // Get the container CPU/RAM to subtract from the docker_usage - res, err := d.getContainersResources([]string{c_id}) + res, err := d.getContainersResources([]string{cId}) if err != nil { - return log.Error("Docker: Unable to collect the container resources:", c_name, err) + return log.Error("Docker: Unable to collect the container resources:", cName, err) } // Locked in the beginning of the function - d.docker_usage.Subtract(res) + d.dockerUsage.Subtract(res) } // Stop the container - if _, _, err := runAndLogRetry(3, 10*time.Second, d.cfg.DockerPath, "stop", c_id); err != nil { - return log.Error("Docker: Unable to stop the container:", c_name, err) + if _, _, err := runAndLogRetry(3, 10*time.Second, d.cfg.DockerPath, "stop", cId); err != nil { + return log.Error("Docker: Unable to stop the container:", cName, err) } // Remove the container - if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "rm", c_id); err != nil { - return log.Error("Docker: Unable to remove the container:", c_name, err) + if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "rm", cId); err != nil { + return log.Error("Docker: Unable to remove the container:", cName, err) } // Umount the disk volumes if needed mounts, _, err := runAndLog(3*time.Second, "/sbin/mount") if err != nil { - return log.Error("Docker: Unable to list the mount points:", c_name, err) + return log.Error("Docker: Unable to list the mount points:", cName, err) } - for _, vol_path := range c_volumes { - if strings.Contains(mounts, vol_path) { - if _, _, err := runAndLog(5*time.Second, "/usr/bin/hdiutil", "detach", vol_path); err != nil { - return log.Error("Docker: Unable to detach the volume disk:", c_name, vol_path, err) + for _, volPath := range cVolumes { + if strings.Contains(mounts, volPath) { + if _, _, err := runAndLog(5*time.Second, "/usr/bin/hdiutil", "detach", volPath); err != nil { + return log.Error("Docker: Unable to detach the volume disk:", cName, volPath, err) } } } // Cleaning the container work directory with non-reuse disks - c_workspace_path := filepath.Join(d.cfg.WorkspacePath, c_name) - if _, err := os.Stat(c_workspace_path); !os.IsNotExist(err) { - if err := os.RemoveAll(c_workspace_path); err != nil { + cWorkspacePath := filepath.Join(d.cfg.WorkspacePath, cName) + if _, err := os.Stat(cWorkspacePath); !os.IsNotExist(err) { + if err := os.RemoveAll(cWorkspacePath); err != nil { return err } } - log.Info("Docker: Deallocate of Container completed:", res.Identifier, c_name) + log.Info("Docker: Deallocate of Container completed:", res.Identifier, cName) return nil } diff --git a/lib/drivers/docker/options.go b/lib/drivers/docker/options.go index d378417..8d6fcc9 100644 --- a/lib/drivers/docker/options.go +++ b/lib/drivers/docker/options.go @@ -44,14 +44,14 @@ func (o *Options) Apply(options util.UnparsedJson) error { func (o *Options) Validate() error { // Check images - var img_err error + var imgErr error for index := range o.Images { if err := o.Images[index].Validate(); err != nil { - img_err = log.Error("Docker: Error during image validation:", err) + imgErr = log.Error("Docker: Error during image validation:", err) } } - if img_err != nil { - return img_err + if imgErr != nil { + return imgErr } return nil diff --git a/lib/drivers/docker/util.go b/lib/drivers/docker/util.go index 47054b5..2856fb2 100644 --- a/lib/drivers/docker/util.go +++ b/lib/drivers/docker/util.go @@ -30,36 +30,36 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) -func (d *Driver) getContainersResources(container_ids []string) (types.Resources, error) { +func (d *Driver) getContainersResources(containerIds []string) (types.Resources, error) { var out types.Resources // Getting current running containers info - will return ",\n..." for each one - docker_args := []string{"inspect", "--format", "{{ .HostConfig.NanoCpus }},{{ .HostConfig.Memory }}"} - docker_args = append(docker_args, container_ids...) - stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, docker_args...) + dockerArgs := []string{"inspect", "--format", "{{ .HostConfig.NanoCpus }},{{ .HostConfig.Memory }}"} + dockerArgs = append(dockerArgs, containerIds...) + stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, dockerArgs...) if err != nil { return out, fmt.Errorf("Docker: Unable to inspect the containers to get used resources: %v", err) } - res_list := strings.Split(strings.TrimSpace(stdout), "\n") - for _, res := range res_list { - cpu_mem := strings.Split(res, ",") - if len(cpu_mem) < 2 { - return out, fmt.Errorf("Docker: Not enough info values in return: %q", res_list) + resList := strings.Split(strings.TrimSpace(stdout), "\n") + for _, res := range resList { + cpuMem := strings.Split(res, ",") + if len(cpuMem) < 2 { + return out, fmt.Errorf("Docker: Not enough info values in return: %q", resList) } - res_cpu, err := strconv.ParseUint(cpu_mem[0], 10, 64) + resCpu, err := strconv.ParseUint(cpuMem[0], 10, 64) if err != nil { - return out, fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpu_mem[0]) + return out, fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpuMem[0]) } - res_ram, err := strconv.ParseUint(cpu_mem[1], 10, 64) + resRam, err := strconv.ParseUint(cpuMem[1], 10, 64) if err != nil { - return out, fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpu_mem[1]) + return out, fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpuMem[1]) } - if res_cpu == 0 || res_ram == 0 { - return out, fmt.Errorf("Docker: The container is non-Fish controlled zero-cpu/ram ones: %q", container_ids) + if resCpu == 0 || resRam == 0 { + return out, fmt.Errorf("Docker: The container is non-Fish controlled zero-cpu/ram ones: %q", containerIds) } - out.Cpu += uint(res_cpu / 1000000000) // Originallly in NCPU - out.Ram += uint(res_ram / 1073741824) // Get in GB + out.Cpu += uint(resCpu / 1000000000) // Originallly in NCPU + out.Ram += uint(resRam / 1073741824) // Get in GB // TODO: Add disks too here } @@ -77,31 +77,31 @@ func (d *Driver) getInitialUsage() (types.Resources, error) { return out, fmt.Errorf("Docker: Unable to list the running containers: %v", err) } - ids_list := strings.Split(strings.TrimSpace(stdout), "\n") - if len(ids_list) == 1 && ids_list[0] == "" { + idsList := strings.Split(strings.TrimSpace(stdout), "\n") + if len(idsList) == 1 && idsList[0] == "" { // It's actually empty so skip it return out, nil } - out, err = d.getContainersResources(ids_list) + out, err = d.getContainersResources(idsList) if err != nil { return out, err } - if out.IsEmpty() || len(ids_list) == 1 { + if out.IsEmpty() || len(idsList) == 1 { // There is no or one container is allocated - so for safety use false for modifiers return out, nil } // Let's try to find the modificators that is used - if len(ids_list) > 1 { + if len(idsList) > 1 { // There is more than one container is running so multitenancy is true out.Multitenancy = true } - if out.Cpu > d.total_cpu { + if out.Cpu > d.totalCpu { out.CpuOverbook = true } - if out.Ram > d.total_ram { + if out.Ram > d.totalRam { out.RamOverbook = true } @@ -109,17 +109,17 @@ func (d *Driver) getInitialUsage() (types.Resources, error) { } // Collects the available resource with alteration -func (d *Driver) getAvailResources() (avail_cpu, avail_ram uint) { +func (d *Driver) getAvailResources() (availCpu, availRam uint) { if d.cfg.CpuAlter < 0 { - avail_cpu = d.total_cpu - uint(-d.cfg.CpuAlter) + availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) } else { - avail_cpu = d.total_cpu + uint(d.cfg.CpuAlter) + availCpu = d.totalCpu + uint(d.cfg.CpuAlter) } if d.cfg.RamAlter < 0 { - avail_ram = d.total_ram - uint(-d.cfg.RamAlter) + availRam = d.totalRam - uint(-d.cfg.RamAlter) } else { - avail_ram = d.total_ram + uint(d.cfg.RamAlter) + availRam = d.totalRam + uint(d.cfg.RamAlter) } return @@ -155,23 +155,23 @@ func (d *Driver) loadImages(opts *Options) (string, error) { // They needed to be processed sequentially because the childs does not // contains the parent's layers so parents should be loaded first - target_out := "" - var loaded_images []string - for image_index, image := range opts.Images { - image_unpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) + targetOut := "" + var loadedImages []string + for imageIndex, image := range opts.Images { + imageUnpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) // Getting the image subdir name in the unpacked dir subdir := "" - items, err := os.ReadDir(image_unpacked) + items, err := os.ReadDir(imageUnpacked) if err != nil { - log.Error("Docker: Unable to read the unpacked directory:", image_unpacked, err) + log.Error("Docker: Unable to read the unpacked directory:", imageUnpacked, err) return "", fmt.Errorf("Docker: The image was unpacked incorrectly, please check log for the errors") } for _, f := range items { if strings.HasPrefix(f.Name(), image.Name) { if f.Type()&fs.ModeSymlink != 0 { // Potentially it can be a symlink (like used in local tests) - if _, err := os.Stat(filepath.Join(image_unpacked, f.Name())); err != nil { + if _, err := os.Stat(filepath.Join(imageUnpacked, f.Name())); err != nil { log.Warn("Docker: The image symlink is broken:", f.Name(), err) continue } @@ -181,61 +181,61 @@ func (d *Driver) loadImages(opts *Options) (string, error) { } } if subdir == "" { - log.Errorf("Docker: Unpacked image '%s' has no subfolder '%s', only: %q", image_unpacked, image.Name, items) + log.Errorf("Docker: Unpacked image '%s' has no subfolder '%s', only: %q", imageUnpacked, image.Name, items) return "", fmt.Errorf("Docker: The image was unpacked incorrectly, please check log for the errors") } // Optimization to check if the image exists and not load it again - subdir_ver_end := strings.LastIndexByte(subdir, '_') - if subdir_ver_end > 0 { - image_found := "" + subdirVerEnd := strings.LastIndexByte(subdir, '_') + if subdirVerEnd > 0 { + imageFound := "" // Search the image by image ID prefix and list the image tags - image_tags, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "image", "inspect", - fmt.Sprintf("sha256:%s", subdir[subdir_ver_end+1:]), + imageTags, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "image", "inspect", + fmt.Sprintf("sha256:%s", subdir[subdirVerEnd+1:]), "--format", "{{ range .RepoTags }}{{ println . }}{{ end }}", ) if err == nil { // The image could contain a number of tags so check them all - found_images := strings.Split(strings.TrimSpace(image_tags), "\n") - for _, tag := range found_images { + foundImages := strings.Split(strings.TrimSpace(imageTags), "\n") + for _, tag := range foundImages { if strings.HasSuffix(strings.Replace(tag, ":", "-", 1), subdir) { - image_found = tag - loaded_images = append(loaded_images, image_found) + imageFound = tag + loadedImages = append(loadedImages, imageFound) // If it's the last image then it's the target one - if image_index+1 == len(opts.Images) { - target_out = image_found + if imageIndex+1 == len(opts.Images) { + targetOut = imageFound } break } } } - if image_found != "" { - log.Debug("Docker: The image was found in the local docker registry:", image_found) + if imageFound != "" { + log.Debug("Docker: The image was found in the local docker registry:", imageFound) continue } } // Load the docker image // sha256 prefix the same - image_archive := filepath.Join(image_unpacked, subdir, image.Name+".tar") - stdout, _, err := runAndLog(5*time.Minute, d.cfg.DockerPath, "image", "load", "-q", "-i", image_archive) + imageArchive := filepath.Join(imageUnpacked, subdir, image.Name+".tar") + stdout, _, err := runAndLog(5*time.Minute, d.cfg.DockerPath, "image", "load", "-q", "-i", imageArchive) if err != nil { - log.Error("Docker: Unable to load the image:", image_archive, err) + log.Error("Docker: Unable to load the image:", imageArchive, err) return "", fmt.Errorf("Docker: The image was unpacked incorrectly, please check log for the errors") } for _, line := range strings.Split(stdout, "\n") { if !strings.HasPrefix(line, "Loaded image: ") { continue } - image_name_version := strings.Split(line, ": ")[1] + imageNameVersion := strings.Split(line, ": ")[1] - loaded_images = append(loaded_images, image_name_version) + loadedImages = append(loadedImages, imageNameVersion) // If it's the last image then it's the target one - if image_index+1 == len(opts.Images) { - target_out = image_name_version + if imageIndex+1 == len(opts.Images) { + targetOut = imageNameVersion } break } @@ -244,17 +244,17 @@ func (d *Driver) loadImages(opts *Options) (string, error) { log.Info("Docker: All the images are processed.") // Check all the images are in place just by number of them - if len(opts.Images) != len(loaded_images) { - return "", log.Errorf("Docker: Not all the images are ok (%d out of %d), please check log for the errors", len(loaded_images), len(opts.Images)) + if len(opts.Images) != len(loadedImages) { + return "", log.Errorf("Docker: Not all the images are ok (%d out of %d), please check log for the errors", len(loadedImages), len(opts.Images)) } - return target_out, nil + return targetOut, nil } // Receives the container ID out of the container name -func (d *Driver) getAllocatedContainerId(c_name string) string { +func (d *Driver) getAllocatedContainerId(cName string) string { // Probably it's better to store the current list in the memory - stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "ps", "-a", "-q", "--filter", "name="+c_name) + stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "ps", "-a", "-q", "--filter", "name="+cName) if err != nil { return "" } @@ -274,16 +274,16 @@ func (d *Driver) isNetworkExists(name string) bool { } // Creates disks directories described by the disks map -func (d *Driver) disksCreate(c_name string, run_args *[]string, disks map[string]types.ResourcesDisk) error { +func (d *Driver) disksCreate(cName string, runArgs *[]string, disks map[string]types.ResourcesDisk) error { // Create disks - disk_paths := make(map[string]string, len(disks)) + diskPaths := make(map[string]string, len(disks)) - for d_name, disk := range disks { - disk_path := filepath.Join(d.cfg.WorkspacePath, c_name, "disk-"+d_name) + for dName, disk := range disks { + diskPath := filepath.Join(d.cfg.WorkspacePath, cName, "disk-"+dName) if disk.Reuse { - disk_path = filepath.Join(d.cfg.WorkspacePath, "disk-"+d_name) + diskPath = filepath.Join(d.cfg.WorkspacePath, "disk-"+dName) } - if err := os.MkdirAll(filepath.Dir(disk_path), 0o755); err != nil { + if err := os.MkdirAll(filepath.Dir(diskPath), 0o755); err != nil { return err } @@ -292,18 +292,18 @@ func (d *Driver) disksCreate(c_name string, run_args *[]string, disks map[string // TODO: Ensure failures doesn't leave the changes behind (like mounted disks or files) if disk.Type == "dir" { - if err := os.MkdirAll(disk_path, 0o777); err != nil { + if err := os.MkdirAll(diskPath, 0o777); err != nil { return err } - disk_paths[disk_path] = disk.Label + diskPaths[diskPath] = disk.Label // TODO: Validate the available disk space for disk.Size continue } // Create virtual disk in order to restrict the disk space - dmg_path := disk_path + ".dmg" + dmgPath := diskPath + ".dmg" - label := d_name + label := dName if disk.Label != "" { // Label can be used as mount point so cut the path separator out label = strings.ReplaceAll(disk.Label, "/", "") @@ -312,67 +312,67 @@ func (d *Driver) disksCreate(c_name string, run_args *[]string, disks map[string } // Do not recreate the disk if it is exists - if _, err := os.Stat(dmg_path); os.IsNotExist(err) { - var disk_type string + if _, err := os.Stat(dmgPath); os.IsNotExist(err) { + var diskType string switch disk.Type { case "hfs+": - disk_type = "HFS+" + diskType = "HFS+" case "fat32": - disk_type = "FAT32" + diskType = "FAT32" default: - disk_type = "ExFAT" + diskType = "ExFAT" } - args := []string{"create", dmg_path, - "-fs", disk_type, + args := []string{"create", dmgPath, + "-fs", diskType, "-layout", "NONE", "-volname", label, "-size", fmt.Sprintf("%dm", disk.Size*1024), } if _, _, err := runAndLog(10*time.Minute, "/usr/bin/hdiutil", args...); err != nil { - return log.Error("Docker: Unable to create dmg disk:", dmg_path, err) + return log.Error("Docker: Unable to create dmg disk:", dmgPath, err) } } - mount_point := filepath.Join("/Volumes", fmt.Sprintf("%s-%s", c_name, d_name)) + mountPoint := filepath.Join("/Volumes", fmt.Sprintf("%s-%s", cName, dName)) // Attach & mount disk - if _, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "attach", dmg_path, "-mountpoint", mount_point); err != nil { - return log.Error("Docker: Unable to attach dmg disk:", dmg_path, mount_point, err) + if _, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "attach", dmgPath, "-mountpoint", mountPoint); err != nil { + return log.Error("Docker: Unable to attach dmg disk:", dmgPath, mountPoint, err) } // Allow anyone to modify the disk content - if err := os.Chmod(mount_point, 0o777); err != nil { - return log.Error("Docker: Unable to change the disk access rights:", mount_point, err) + if err := os.Chmod(mountPoint, 0o777); err != nil { + return log.Error("Docker: Unable to change the disk access rights:", mountPoint, err) } - disk_paths[mount_point] = disk.Label + diskPaths[mountPoint] = disk.Label } - if len(disk_paths) == 0 { + if len(diskPaths) == 0 { return nil } // Connect disk files to container via cmd - for mount_path, mount_point := range disk_paths { + for mountPath, mountPoint := range diskPaths { // If the label is not an absolute path than use mnt dir - if !strings.HasPrefix(mount_point, "/") { - mount_point = filepath.Join("/mnt", mount_point) + if !strings.HasPrefix(mountPoint, "/") { + mountPoint = filepath.Join("/mnt", mountPoint) } - *run_args = append(*run_args, "-v", fmt.Sprintf("%s:%s", mount_path, mount_point)) + *runArgs = append(*runArgs, "-v", fmt.Sprintf("%s:%s", mountPath, mountPoint)) } return nil } // Creates the env file for the container out of metadata specification -func (d *Driver) envCreate(c_name string, metadata map[string]any) (string, error) { - env_file_path := filepath.Join(d.cfg.WorkspacePath, c_name, ".env") - if err := os.MkdirAll(filepath.Dir(env_file_path), 0o755); err != nil { - return "", log.Error("Docker: Unable to create the container directory:", filepath.Dir(env_file_path), err) +func (d *Driver) envCreate(cName string, metadata map[string]any) (string, error) { + envFilePath := filepath.Join(d.cfg.WorkspacePath, cName, ".env") + if err := os.MkdirAll(filepath.Dir(envFilePath), 0o755); err != nil { + return "", log.Error("Docker: Unable to create the container directory:", filepath.Dir(envFilePath), err) } - fd, err := os.OpenFile(env_file_path, os.O_WRONLY|os.O_CREATE, 0o640) + fd, err := os.OpenFile(envFilePath, os.O_WRONLY|os.O_CREATE, 0o640) if err != nil { - return "", log.Error("Docker: Unable to create env file:", env_file_path, err) + return "", log.Error("Docker: Unable to create env file:", envFilePath, err) } defer fd.Close() @@ -380,11 +380,11 @@ func (d *Driver) envCreate(c_name string, metadata map[string]any) (string, erro for key, value := range metadata { data := []byte(fmt.Sprintf("%s=%s\n", key, value)) if _, err := fd.Write(data); err != nil { - return "", log.Error("Docker: Unable to write env file data:", env_file_path, err) + return "", log.Error("Docker: Unable to write env file data:", envFilePath, err) } } - return env_file_path, nil + return envFilePath, nil } // Runs & logs the executable command diff --git a/lib/drivers/driver.go b/lib/drivers/driver.go index fe137dc..1100da9 100644 --- a/lib/drivers/driver.go +++ b/lib/drivers/driver.go @@ -52,7 +52,7 @@ type ResourceDriver interface { // -> node_usage - how much of node resources was used by all the drivers. Usually should not be used by the cloud drivers // -> req - definition describes requirements for the resource // <- capacity - the number of such definitions the driver could run, if -1 - error happened - AvailableCapacity(node_usage types.Resources, req types.LabelDefinition) (capacity int64) + AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) (capacity int64) // Allocate the resource by definition and returns hw address // -> def - describes the driver options to allocate the required resource diff --git a/lib/drivers/image.go b/lib/drivers/image.go index a3749ce..505af43 100644 --- a/lib/drivers/image.go +++ b/lib/drivers/image.go @@ -60,17 +60,17 @@ func (i *Image) Validate() error { // Fill name out of image url if i.Name == "" { i.Name = path.Base(i.Url) - minus_loc := strings.LastIndexByte(i.Name, '-') - if minus_loc != -1 { + minusLoc := strings.LastIndexByte(i.Name, '-') + if minusLoc != -1 { // Use the part from beginning to last minus ('-') - useful to separate version part - i.Name = i.Name[0:minus_loc] + i.Name = i.Name[0:minusLoc] } else if strings.LastIndexByte(i.Name, '.') != -1 { // Split by extension - need to take into account dual extension of tar archives (ex. ".tar.xz") - name_split := strings.Split(i.Name, ".") - if name_split[len(name_split)-2] == "tar" { - i.Name = strings.Join(name_split[0:len(name_split)-2], ".") + nameSplit := strings.Split(i.Name, ".") + if nameSplit[len(nameSplit)-2] == "tar" { + i.Name = strings.Join(nameSplit[0:len(nameSplit)-2], ".") } else { - i.Name = strings.Join(name_split[0:len(name_split)-1], ".") + i.Name = strings.Join(nameSplit[0:len(nameSplit)-1], ".") } } } @@ -78,29 +78,29 @@ func (i *Image) Validate() error { // Fill version out of image url if i.Version == "" { i.Version = path.Base(i.Url) - minus_loc := strings.LastIndexByte(i.Version, '-') - if minus_loc != -1 { + minusLoc := strings.LastIndexByte(i.Version, '-') + if minusLoc != -1 { // Use the part from the last minus ('-') to the end - i.Version = i.Version[minus_loc+1:] + i.Version = i.Version[minusLoc+1:] } if strings.LastIndexByte(i.Version, '.') != -1 { // Split by extension - need to take into account dual extension of tar archives (ex. ".tar.xz") - version_split := strings.Split(i.Version, ".") - if version_split[len(version_split)-2] == "tar" { - i.Version = strings.Join(version_split[0:len(version_split)-2], ".") + versionSplit := strings.Split(i.Version, ".") + if versionSplit[len(versionSplit)-2] == "tar" { + i.Version = strings.Join(versionSplit[0:len(versionSplit)-2], ".") } else { - i.Version = strings.Join(version_split[0:len(version_split)-1], ".") + i.Version = strings.Join(versionSplit[0:len(versionSplit)-1], ".") } } } // Check sum format if i.Sum != "" { - sum_split := strings.SplitN(i.Sum, ":", 2) - if len(i.Sum) > 0 && len(sum_split) != 2 { + sumSplit := strings.SplitN(i.Sum, ":", 2) + if len(i.Sum) > 0 && len(sumSplit) != 2 { return fmt.Errorf("Image: Checksum should be in format ':': %q", i.Sum) } - algo := sum_split[0] + algo := sumSplit[0] if algo != "md5" && algo != "sha1" && algo != "sha256" && algo != "sha512" { return fmt.Errorf("Image: Checksum with not supported algorithm (md5, sha1, sha256, sha512): %q", algo) } @@ -116,28 +116,28 @@ func (i *Image) Validate() error { // quick as possible. // -> out_dir - is the directory where the image will be placed. It will be unpacked to out_dir/Name-Version/ // -> user, password - credentials for HTTP Basic auth -func (i *Image) DownloadUnpack(out_dir, user, password string) error { - img_path := filepath.Join(out_dir, i.Name+"-"+i.Version) - log.Debug("Image: Downloading & Unpacking image:", i.Url, img_path) - lock_path := img_path + ".lock" +func (i *Image) DownloadUnpack(outDir, user, password string) error { + imgPath := filepath.Join(outDir, i.Name+"-"+i.Version) + log.Debug("Image: Downloading & Unpacking image:", i.Url, imgPath) + lockPath := imgPath + ".lock" // Wait for another process to download and unpack the archive // In case it failed to download - will be redownloaded further - util.WaitLock(lock_path, func() { - log.Debug("Util: Cleaning the abandoned files and begin redownloading:", img_path) - os.RemoveAll(img_path) + util.WaitLock(lockPath, func() { + log.Debug("Util: Cleaning the abandoned files and begin redownloading:", imgPath) + os.RemoveAll(imgPath) }) - if _, err := os.Stat(img_path); !os.IsNotExist(err) { + if _, err := os.Stat(imgPath); !os.IsNotExist(err) { // The unpacked archive is already here, so nothing to do return nil } // Creating lock file in order to not screw it up in multiprocess system - if err := util.CreateLock(lock_path); err != nil { + if err := util.CreateLock(lockPath); err != nil { return fmt.Errorf("Util: Unable to create lock file: %v", err) } - defer os.Remove(lock_path) + defer os.Remove(lockPath) client := &http.Client{} req, _ := http.NewRequestWithContext(context.TODO(), http.MethodGet, i.Url, nil) @@ -146,34 +146,34 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { } resp, err := client.Do(req) if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to request url %q: %v", i.Url, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to download file %q: %s", i.Url, resp.Status) } // Printing the download progress bodypt := &util.PassThruMonitor{ Reader: resp.Body, - Name: fmt.Sprintf("Image: Downloading '%s'", img_path), + Name: fmt.Sprintf("Image: Downloading '%s'", imgPath), Length: resp.ContentLength, } // Process checksum - var data_reader io.Reader + var dataReader io.Reader var hasher hash.Hash if i.Sum == "" { // Just use the passthrough body as the source - data_reader = bodypt + dataReader = bodypt } else { - algo_sum := strings.SplitN(i.Sum, ":", 2) + algoSum := strings.SplitN(i.Sum, ":", 2) // Calculating checksum during reading from the body - switch algo_sum[0] { + switch algoSum[0] { case "md5": hasher = md5.New() // #nosec G401 case "sha1": @@ -183,29 +183,29 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { case "sha512": hasher = sha512.New() default: - os.RemoveAll(img_path) - return fmt.Errorf("Image: Not recognized checksum algorithm (md5, sha1, sha256, sha512): %q", algo_sum[0]) + os.RemoveAll(imgPath) + return fmt.Errorf("Image: Not recognized checksum algorithm (md5, sha1, sha256, sha512): %q", algoSum[0]) } - data_reader = io.TeeReader(bodypt, hasher) + dataReader = io.TeeReader(bodypt, hasher) // Check if headers contains the needed algo:hash for quick validation // We're not completely trust the server, but if it returns the wrong sum - we're dropping. // Header should look like: X-Checksum-Md5 X-Checksum-Sha1 X-Checksum-Sha256 (Artifactory) - if remote_sum := resp.Header.Get("X-Checksum-" + strings.Title(algo_sum[0])); remote_sum != "" { //nolint:staticcheck // SA1019 Strictly ASCII here + if remoteSum := resp.Header.Get("X-Checksum-" + strings.Title(algoSum[0])); remoteSum != "" { //nolint:staticcheck // SA1019 Strictly ASCII here // Server returned mathing header, so compare it's value to our checksum - if remote_sum != algo_sum[1] { - os.RemoveAll(img_path) + if remoteSum != algoSum[1] { + os.RemoveAll(imgPath) return fmt.Errorf("Image: The remote checksum (from header X-Checksum-%s) doesn't equal the desired one: %q != %q for %q", - strings.Title(algo_sum[0]), remote_sum, algo_sum[1], i.Url) //nolint:staticcheck // SA1019 Strictly ASCII here + strings.Title(algoSum[0]), remoteSum, algoSum[1], i.Url) //nolint:staticcheck // SA1019 Strictly ASCII here } } } // Unpack the stream - xzr, err := xz.NewReader(data_reader) + xzr, err := xz.NewReader(dataReader) if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to create XZ reader: %v", err) } @@ -220,37 +220,37 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { break } if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Tar archive failed to iterate next file: %v", err) } // Check the name doesn't contain any traversal elements if strings.Contains(hdr.Name, "..") { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: The archive filepath contains '..' which is security forbidden: %q", hdr.Name) } - target := filepath.Join(img_path, hdr.Name) // #nosec G305 , checked above + target := filepath.Join(imgPath, hdr.Name) // #nosec G305 , checked above switch hdr.Typeflag { case tar.TypeDir: // Create a directory err = os.MkdirAll(target, os.FileMode(hdr.Mode)) if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to create directory %q: %v", target, err) } case tar.TypeReg: // Write a file - log.Debugf("Util: Extracting '%s': %s", img_path, hdr.Name) + log.Debugf("Util: Extracting '%s': %s", imgPath, hdr.Name) err = os.MkdirAll(filepath.Dir(target), 0750) if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to create directory for file %q: %v", target, err) } w, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode)) if err != nil { - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to open file %q for unpack: %v", target, err) } defer w.Close() @@ -263,7 +263,7 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { } else if err == io.EOF { break } - os.RemoveAll(img_path) + os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to unpack content to file %q: %v", target, err) } } @@ -272,14 +272,14 @@ func (i *Image) DownloadUnpack(out_dir, user, password string) error { // Compare the calculated checksum to the desired one if i.Sum != "" { // Completing read of the stream to calculate the hash properly (tar will not do that) - io.ReadAll(data_reader) + io.ReadAll(dataReader) - algo_sum := strings.SplitN(i.Sum, ":", 2) - calculated_sum := hex.EncodeToString(hasher.Sum(nil)) - if calculated_sum != algo_sum[1] { - os.RemoveAll(img_path) + algoSum := strings.SplitN(i.Sum, ":", 2) + calculatedSum := hex.EncodeToString(hasher.Sum(nil)) + if calculatedSum != algoSum[1] { + os.RemoveAll(imgPath) return fmt.Errorf("Image: The calculated checksum doesn't equal the desired one: %q != %q for %q", - calculated_sum, algo_sum[1], i.Url) + calculatedSum, algoSum[1], i.Url) } } diff --git a/lib/drivers/image_test.go b/lib/drivers/image_test.go index 1389fc4..3b9c267 100644 --- a/lib/drivers/image_test.go +++ b/lib/drivers/image_test.go @@ -21,9 +21,9 @@ import ( ) // Data for download/unpack tests -const test_image_ci_sha256 = "48975fe7070f46788898e0729067424a8426ab27fe424500c5046f730d8e2ea5" -const test_image_ci_path = `/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz` -const test_image_ci_data = `/Td6WFoAAATm1rRGBMDGBICgASEBHAAAAAAAAHVrYcHgT/8CPl0AOhlKzh19/6MaQrMr9RrXZZWk9zwPnxjjpSvabgz3XRQs+H+dqotO+/DDO4qGxBjzRCfdCYPLz7PwgesGWM6q2rgpyOodGy/fE8D+r8dfs91GlyBovVJc6uZdtbJKrWVnv+jyvbxH55bmsGT0bdLORrG6rcmHQZ8tRr3WakelitUHoo5AljY6fq9RGvSgoeCNlE5bs0W/yJSaxs+Au5fHr1UjwqaqkdobRwtLiDIkjVWx2VutgHqhVR5xKl1ZW01bzOSQqt+Ahqt4HS6ODgp3HQmKNRuIlJa2ydxxdVlZCE6QFngbcp0dyOboWbUTTNi26roufISGmRD2ZIfdnufbPi2Uk8o20R0gaGtVRo64+kBqukRvG9qb1+WvQuCaiJyYAZ9fvf5wGGOzsNERBVvUU0nMK058oqujolnNSlxnugsHj6FNY5PYBzzu31mKfqUQV95/OzsUKfNp8gcWSOj3L8TIzkxB2Njwu5iCFQ96qFBPw/ArUWlxhhQIWKCIOCdsvD4lGP/Pdk8XbZJnjCMV0f8TqsuKUKSzXxCf++3kyJw700Rx4ry2bAPLs0/qxNIsJfhors/MW0B0RrL3p7nLxGlcBCtP3vZZvqSNhPMhG3outPyPlD/bvHLAnQtJTtjphyU7UazpkjcXslP+bSei2X7/t9D4kVqZgasnpEEBpTay5d+n/TKHv9FxLhZWq4mglUsZ7RyNIg2wdJzpe/fJ9SwkQPVxw0q/e21FObbGiwsELvSMPr80buV3ecFzAAAAAMTNLJ0ukWt/AAHiBICgAQCOEmNAscRn+wIAAAAABFla` +const testImageCiSha256 = "48975fe7070f46788898e0729067424a8426ab27fe424500c5046f730d8e2ea5" +const testImageCiPath = `/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz` +const testImageCiData = `/Td6WFoAAATm1rRGBMDGBICgASEBHAAAAAAAAHVrYcHgT/8CPl0AOhlKzh19/6MaQrMr9RrXZZWk9zwPnxjjpSvabgz3XRQs+H+dqotO+/DDO4qGxBjzRCfdCYPLz7PwgesGWM6q2rgpyOodGy/fE8D+r8dfs91GlyBovVJc6uZdtbJKrWVnv+jyvbxH55bmsGT0bdLORrG6rcmHQZ8tRr3WakelitUHoo5AljY6fq9RGvSgoeCNlE5bs0W/yJSaxs+Au5fHr1UjwqaqkdobRwtLiDIkjVWx2VutgHqhVR5xKl1ZW01bzOSQqt+Ahqt4HS6ODgp3HQmKNRuIlJa2ydxxdVlZCE6QFngbcp0dyOboWbUTTNi26roufISGmRD2ZIfdnufbPi2Uk8o20R0gaGtVRo64+kBqukRvG9qb1+WvQuCaiJyYAZ9fvf5wGGOzsNERBVvUU0nMK058oqujolnNSlxnugsHj6FNY5PYBzzu31mKfqUQV95/OzsUKfNp8gcWSOj3L8TIzkxB2Njwu5iCFQ96qFBPw/ArUWlxhhQIWKCIOCdsvD4lGP/Pdk8XbZJnjCMV0f8TqsuKUKSzXxCf++3kyJw700Rx4ry2bAPLs0/qxNIsJfhors/MW0B0RrL3p7nLxGlcBCtP3vZZvqSNhPMhG3outPyPlD/bvHLAnQtJTtjphyU7UazpkjcXslP+bSei2X7/t9D4kVqZgasnpEEBpTay5d+n/TKHv9FxLhZWq4mglUsZ7RyNIg2wdJzpe/fJ9SwkQPVxw0q/e21FObbGiwsELvSMPr80buV3ecFzAAAAAMTNLJ0ukWt/AAHiBICgAQCOEmNAscRn+wIAAAAABFla` var server *httptest.Server @@ -97,15 +97,15 @@ func Test_image_downloadunpack(t *testing.T) { } w.Header().Set("Content-Type", "application/octet-stream") if _, ok := r.URL.Query()["nosumheader"]; !ok { - w.Header().Set("X-Checksum-Sha256", test_image_ci_sha256) + w.Header().Set("X-Checksum-Sha256", testImageCiSha256) } w.WriteHeader(http.StatusOK) - data, _ := base64.StdEncoding.DecodeString(test_image_ci_data) + data, _ := base64.StdEncoding.DecodeString(testImageCiData) w.Write(data) } server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch strings.TrimSpace(r.URL.Path) { - case test_image_ci_path: + case testImageCiPath: handler(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) @@ -114,8 +114,8 @@ func Test_image_downloadunpack(t *testing.T) { t.Run("good", func(t *testing.T) { image := Image{ - Url: server.URL + test_image_ci_path, - Sum: "sha256:" + test_image_ci_sha256, + Url: server.URL + testImageCiPath, + Sum: "sha256:" + testImageCiSha256, } // Make sure image is ok @@ -132,7 +132,7 @@ func Test_image_downloadunpack(t *testing.T) { t.Run("bad_url", func(t *testing.T) { image := Image{ Url: server.URL + "/not/existing/artifact-version.tar.xz", - Sum: "sha256:" + test_image_ci_sha256, + Sum: "sha256:" + testImageCiSha256, } // Make sure image is ok @@ -149,7 +149,7 @@ func Test_image_downloadunpack(t *testing.T) { t.Run("bad_header_checksum", func(t *testing.T) { image := Image{ - Url: server.URL + test_image_ci_path, + Url: server.URL + testImageCiPath, Sum: "sha256:0123456789abcdef", } @@ -160,14 +160,14 @@ func Test_image_downloadunpack(t *testing.T) { // Download/unpack into temp directory err := image.DownloadUnpack(t.TempDir(), "user", "password") - if err == nil || err.Error() != `Image: The remote checksum (from header X-Checksum-Sha256) doesn't equal the desired one: "`+test_image_ci_sha256+`" != "0123456789abcdef" for "`+server.URL+test_image_ci_path+`"` { + if err == nil || err.Error() != `Image: The remote checksum (from header X-Checksum-Sha256) doesn't equal the desired one: "`+testImageCiSha256+`" != "0123456789abcdef" for "`+server.URL+testImageCiPath+`"` { t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.Url, err) } }) t.Run("bad_calculated_checksum", func(t *testing.T) { image := Image{ - Url: server.URL + test_image_ci_path + "?nosumheader", + Url: server.URL + testImageCiPath + "?nosumheader", Sum: "sha256:0123456789abcdef", } @@ -178,7 +178,7 @@ func Test_image_downloadunpack(t *testing.T) { // Download/unpack into temp directory err := image.DownloadUnpack(t.TempDir(), "user", "password") - if err == nil || err.Error() != `Image: The calculated checksum doesn't equal the desired one: "`+test_image_ci_sha256+`" != "0123456789abcdef" for "`+server.URL+test_image_ci_path+`?nosumheader"` { + if err == nil || err.Error() != `Image: The calculated checksum doesn't equal the desired one: "`+testImageCiSha256+`" != "0123456789abcdef" for "`+server.URL+testImageCiPath+`?nosumheader"` { t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.Url, err) } }) diff --git a/lib/drivers/native/config.go b/lib/drivers/native/config.go index 11da509..145a848 100644 --- a/lib/drivers/native/config.go +++ b/lib/drivers/native/config.go @@ -305,22 +305,22 @@ func (c *Config) Validate() (err error) { } // Create test init script - init_path, err := testScriptCreate(user) + initPath, err := testScriptCreate(user) if err != nil { userDelete(c, user) - return fmt.Errorf("Native: Unable to create test script in %q: %v", init_path, err) + return fmt.Errorf("Native: Unable to create test script in %q: %v", initPath, err) } // Run the test init script - if err = userRun(c, nil, user, init_path, map[string]any{}); err != nil { + if err = userRun(c, nil, user, initPath, map[string]any{}); err != nil { userDelete(c, user) - return fmt.Errorf("Native: Unable to run test init script %q: %v", init_path, err) + return fmt.Errorf("Native: Unable to run test init script %q: %v", initPath, err) } // Cleaning up the test script - if err := testScriptDelete(init_path); err != nil { + if err := testScriptDelete(initPath); err != nil { userDelete(c, user) - return fmt.Errorf("Native: Unable to delete test script in %q: %v", init_path, err) + return fmt.Errorf("Native: Unable to delete test script in %q: %v", initPath, err) } // Clean after the run @@ -336,23 +336,23 @@ func (c *Config) Validate() (err error) { // the user, but will require much less changes in the system. // Validating CpuAlter & RamAlter to not be less then the current cpu/ram count - cpu_stat, err := cpu.Counts(true) + cpuStat, err := cpu.Counts(true) if err != nil { return err } - if c.CpuAlter < 0 && cpu_stat <= -c.CpuAlter { - return log.Errorf("Native: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) + if c.CpuAlter < 0 && cpuStat <= -c.CpuAlter { + return log.Errorf("Native: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpuStat) } - mem_stat, err := mem.VirtualMemory() + memStat, err := mem.VirtualMemory() if err != nil { return err } - ram_stat := mem_stat.Total / 1073741824 // Getting GB from Bytes + ramStat := memStat.Total / 1073741824 // Getting GB from Bytes - if c.RamAlter < 0 && int(ram_stat) <= -c.RamAlter { - return log.Errorf("Native: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ram_stat) + if c.RamAlter < 0 && int(ramStat) <= -c.RamAlter { + return log.Errorf("Native: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ramStat) } return nil diff --git a/lib/drivers/native/driver.go b/lib/drivers/native/driver.go index 135c939..7d0ca27 100644 --- a/lib/drivers/native/driver.go +++ b/lib/drivers/native/driver.go @@ -45,10 +45,10 @@ func init() { type Driver struct { cfg Config // Contains the available tasks of the driver - tasks_list []drivers.ResourceDriverTask + tasksList []drivers.ResourceDriverTask - total_cpu uint // In logical threads - total_ram uint // In RAM GB + totalCpu uint // In logical threads + totalRam uint // In RAM GB } // Is used to provide some data to the entry/metadata values which could contain templates @@ -73,17 +73,17 @@ func (d *Driver) Prepare(config []byte) error { } // Collect node resources status - cpu_stat, err := cpu.Counts(true) + cpuStat, err := cpu.Counts(true) if err != nil { return err } - d.total_cpu = uint(cpu_stat) + d.totalCpu = uint(cpuStat) - mem_stat, err := mem.VirtualMemory() + memStat, err := mem.VirtualMemory() if err != nil { return err } - d.total_ram = uint(mem_stat.Total / 1073741824) // Getting GB from Bytes + d.totalRam = uint(memStat.Total / 1073741824) // Getting GB from Bytes // TODO: Cleanup the image directory in case the images are not good @@ -101,8 +101,8 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { // Empty name means user home which is always exists if img.Tag != "" { found := false - for d_name := range def.Resources.Disks { - if d_name == img.Tag { + for dName := range def.Resources.Disks { + if dName == img.Tag { found = true break } @@ -116,8 +116,8 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { } // Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDefinition) int64 { - var out_count int64 +func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { + var outCount int64 var opts Options if err := opts.Apply(req.Options); err != nil { @@ -125,43 +125,43 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDe } // Check if the node has the required resources - otherwise we can't run it anyhow - avail_cpu, avail_ram := d.getAvailResources() - if req.Resources.Cpu > avail_cpu { + availCpu, availRam := d.getAvailResources() + if req.Resources.Cpu > availCpu { return 0 } - if req.Resources.Ram > avail_ram { + if req.Resources.Ram > availRam { return 0 } // TODO: Check disk requirements // Since we have the required resources - let's check if tenancy allows us to expand them to // run more tenants here - if node_usage.IsEmpty() { + if nodeUsage.IsEmpty() { // In case we dealing with the first one - we need to set usage modificators, otherwise // those values will mess up the next calculations - node_usage.Multitenancy = req.Resources.Multitenancy - node_usage.CpuOverbook = req.Resources.CpuOverbook - node_usage.RamOverbook = req.Resources.RamOverbook + nodeUsage.Multitenancy = req.Resources.Multitenancy + nodeUsage.CpuOverbook = req.Resources.CpuOverbook + nodeUsage.RamOverbook = req.Resources.RamOverbook } - if node_usage.Multitenancy && req.Resources.Multitenancy { + if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much - if node_usage.CpuOverbook && req.Resources.CpuOverbook { - avail_cpu += d.cfg.CpuOverbook + if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { + availCpu += d.cfg.CpuOverbook } - if node_usage.RamOverbook && req.Resources.RamOverbook { - avail_ram += d.cfg.RamOverbook + if nodeUsage.RamOverbook && req.Resources.RamOverbook { + availRam += d.cfg.RamOverbook } } // Calculate how much of those definitions we could run - out_count = int64((avail_cpu - node_usage.Cpu) / req.Resources.Cpu) - ram_count := int64((avail_ram - node_usage.Ram) / req.Resources.Ram) - if out_count > ram_count { - out_count = ram_count + outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + if outCount > ramCount { + outCount = ramCount } // TODO: Add disks into equation - return out_count + return outCount } /** @@ -185,7 +185,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* log.Info("Native: Created user for Application execution:", user) // Create and connect volumes to container - disk_paths, err := d.disksCreate(user, def.Resources.Disks) + diskPaths, err := d.disksCreate(user, def.Resources.Disks) if err != nil { disksDelete(&d.cfg, user) userDelete(&d.cfg, user) @@ -193,17 +193,17 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* } // Set default path as homedir - disk_paths[""] = homedir + diskPaths[""] = homedir // Loading images and unpack them to home/disks according - if err := d.loadImages(user, opts.Images, disk_paths); err != nil { + if err := d.loadImages(user, opts.Images, diskPaths); err != nil { disksDelete(&d.cfg, user) userDelete(&d.cfg, user) return nil, log.Error("Native: Unable to load and unpack images:", err) } // Running workload - if err := userRun(&d.cfg, &EnvData{Disks: disk_paths}, user, opts.Entry, metadata); err != nil { + if err := userRun(&d.cfg, &EnvData{Disks: diskPaths}, user, opts.Entry, metadata); err != nil { disksDelete(&d.cfg, user) userDelete(&d.cfg, user) return nil, log.Error("Native: Unable to run the entry workload:", err) @@ -227,7 +227,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask - for _, task := range d.tasks_list { + for _, task := range d.tasksList { if task.Name() == name { t = task.Clone() } diff --git a/lib/drivers/native/options.go b/lib/drivers/native/options.go index 9a5c3da..71a0a74 100644 --- a/lib/drivers/native/options.go +++ b/lib/drivers/native/options.go @@ -15,7 +15,7 @@ package native import ( "encoding/json" "fmt" - os_user "os/user" + osuser "os/user" "runtime" "text/template" @@ -72,11 +72,11 @@ func (o *Options) Validate() error { // Set default user groups // The user is not complete without the primary group, so using current runtime user group if len(o.Groups) == 0 { - u, e := os_user.Current() + u, e := osuser.Current() if e != nil { return log.Error("Native: Unable to get the current system user:", e) } - group, e := os_user.LookupGroupId(u.Gid) + group, e := osuser.LookupGroupId(u.Gid) if e != nil { return log.Error("Native: Unable to get the current system user group name:", u.Gid, e) } @@ -84,14 +84,14 @@ func (o *Options) Validate() error { } // Check images - var img_err error + var imgErr error for index := range o.Images { if err := o.Images[index].Validate(); err != nil { - img_err = log.Error("Native: Error during image validation:", err) + imgErr = log.Error("Native: Error during image validation:", err) } } - if img_err != nil { - return img_err + if imgErr != nil { + return imgErr } return nil diff --git a/lib/drivers/native/util.go b/lib/drivers/native/util.go index 8a33f17..dd1be19 100644 --- a/lib/drivers/native/util.go +++ b/lib/drivers/native/util.go @@ -20,7 +20,7 @@ import ( "io/fs" "os" "os/exec" - os_user "os/user" + osuser "os/user" "path/filepath" "strconv" "strings" @@ -38,27 +38,27 @@ import ( ) // Common lock to properly acquire unique User ID -var user_create_lock sync.Mutex +var userCreateLock sync.Mutex // Returns the total resources available for the node after alteration -func (d *Driver) getAvailResources() (avail_cpu, avail_ram uint) { +func (d *Driver) getAvailResources() (availCpu, availRam uint) { if d.cfg.CpuAlter < 0 { - avail_cpu = d.total_cpu - uint(-d.cfg.CpuAlter) + availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) } else { - avail_cpu = d.total_cpu + uint(d.cfg.CpuAlter) + availCpu = d.totalCpu + uint(d.cfg.CpuAlter) } if d.cfg.RamAlter < 0 { - avail_ram = d.total_ram - uint(-d.cfg.RamAlter) + availRam = d.totalRam - uint(-d.cfg.RamAlter) } else { - avail_ram = d.total_ram + uint(d.cfg.RamAlter) + availRam = d.totalRam + uint(d.cfg.RamAlter) } return } // Load images and unpack them according the tags -func (d *Driver) loadImages(user string, images []drivers.Image, disk_paths map[string]string) error { +func (d *Driver) loadImages(user string, images []drivers.Image, diskPaths map[string]string) error { var wg sync.WaitGroup for _, image := range images { log.Info("Native: Loading the required image:", image.Name, image.Version, image.Url) @@ -78,19 +78,19 @@ func (d *Driver) loadImages(user string, images []drivers.Image, disk_paths map[ // The images have to be processed sequentially - child images could override the parent files for _, image := range images { - image_unpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) + imageUnpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) // Getting the image subdir name in the unpacked dir subdir := "" - items, err := os.ReadDir(image_unpacked) + items, err := os.ReadDir(imageUnpacked) if err != nil { - return log.Error("Native: Unable to read the unpacked directory:", image_unpacked, err) + return log.Error("Native: Unable to read the unpacked directory:", imageUnpacked, err) } for _, f := range items { if strings.HasPrefix(f.Name(), image.Name) { if f.Type()&fs.ModeSymlink != 0 { // Potentially it can be a symlink (like used in local tests) - if _, err := os.Stat(filepath.Join(image_unpacked, f.Name())); err != nil { + if _, err := os.Stat(filepath.Join(imageUnpacked, f.Name())); err != nil { log.Warn("Native: The image symlink is broken:", f.Name(), err) continue } @@ -100,29 +100,29 @@ func (d *Driver) loadImages(user string, images []drivers.Image, disk_paths map[ } } if subdir == "" { - log.Errorf("Native: Unpacked image '%s' has no subfolder '%s', only: %q", image_unpacked, image.Name, items) + log.Errorf("Native: Unpacked image '%s' has no subfolder '%s', only: %q", imageUnpacked, image.Name, items) return fmt.Errorf("Native: The image was unpacked incorrectly, please check log for the errors") } // Unpacking the image according its specified tag. If tag is empty - unpacks to home dir, // otherwise if tag exists in the disks map - then use its path to unpack there - image_archive := filepath.Join(image_unpacked, subdir, image.Name+".tar") - unpack_path, ok := disk_paths[image.Tag] + imageArchive := filepath.Join(imageUnpacked, subdir, image.Name+".tar") + unpackPath, ok := diskPaths[image.Tag] if !ok { - return log.Error("Native: Unable to find where to unpack the image:", image.Tag, image_archive, err) + return log.Error("Native: Unable to find where to unpack the image:", image.Tag, imageArchive, err) } // Since the image is under Fish node control and user could have no read access to the file // it's a good idea to use stdin of the tar command to unpack properly. - f, err := os.Open(image_archive) + f, err := os.Open(imageArchive) if err != nil { - return log.Error("Native: Unable to read the image:", image_archive, err) + return log.Error("Native: Unable to read the image:", imageArchive, err) } defer f.Close() - log.Info("Native: Unpacking image:", user, image_archive, unpack_path) - _, _, err = runAndLog(5*time.Minute, f, d.cfg.SudoPath, "-n", d.cfg.TarPath, "-xf", "-", "--uname", user, "-C", unpack_path+"/") + log.Info("Native: Unpacking image:", user, imageArchive, unpackPath) + _, _, err = runAndLog(5*time.Minute, f, d.cfg.SudoPath, "-n", d.cfg.TarPath, "-xf", "-", "--uname", user, "-C", unpackPath+"/") if err != nil { - return log.Error("Native: Unable to unpack the image:", image_archive, err) + return log.Error("Native: Unable to unpack the image:", imageArchive, err) } } @@ -158,49 +158,49 @@ func userCreate(c *Config, groups []string) (user, homedir string, err error) { } // Choose the UniqueID for the new user - user_create_lock.Lock() + userCreateLock.Lock() { // Locate the unassigned user id var stdout string if stdout, _, err = runAndLog(5*time.Second, nil, c.DsclPath, ".", "list", "/Users", "UniqueID"); err != nil { - user_create_lock.Unlock() + userCreateLock.Unlock() err = log.Error("Native: Unable to list directory users:", err) return } // Finding the max user id in the OS - user_id := int64(1000) // Min 1000 is ok for most of the unix systems - split_stdout := strings.Split(strings.TrimSpace(stdout), "\n") - for _, line := range split_stdout { - line_id := line[strings.LastIndex(line, " ")+1:] - line_id_num, err := strconv.ParseInt(line_id, 10, 64) + userId := int64(1000) // Min 1000 is ok for most of the unix systems + splitStdout := strings.Split(strings.TrimSpace(stdout), "\n") + for _, line := range splitStdout { + lineId := line[strings.LastIndex(line, " ")+1:] + lineIdNum, err := strconv.ParseInt(lineId, 10, 64) if err != nil { log.Warnf("Native: Unable to parse user id from line: %q", line) continue } - if line_id_num > user_id { - user_id = line_id_num + if lineIdNum > userId { + userId = lineIdNum } } // Increment max user id and use it as unique id for new user - if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "UniqueID", fmt.Sprint(user_id+1)); err != nil { - user_create_lock.Unlock() + if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "UniqueID", fmt.Sprint(userId+1)); err != nil { + userCreateLock.Unlock() err = log.Error("Native: Unable to set user UniqueID:", err) return } } - user_create_lock.Unlock() + userCreateLock.Unlock() // Locate the primary user group id - primary_group, e := os_user.LookupGroup(groups[0]) + primaryGroup, e := osuser.LookupGroup(groups[0]) if e != nil { err = log.Error("Native: Unable to locate group GID for:", groups[0], e) return } // Set user primary group - if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "PrimaryGroupID", primary_group.Gid); err != nil { + if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "PrimaryGroupID", primaryGroup.Gid); err != nil { err = log.Error("Native: Unable to set user PrimaryGroupID:", err) return } @@ -231,8 +231,8 @@ func userCreate(c *Config, groups []string) (user, homedir string, err error) { return } -func processTemplate(tpl_data *EnvData, value string) (string, error) { - if tpl_data == nil { +func processTemplate(tplData *EnvData, value string) (string, error) { + if tplData == nil { return value, nil } tmpl, err := template.New("").Parse(value) @@ -241,7 +241,7 @@ func processTemplate(tpl_data *EnvData, value string) (string, error) { return "", fmt.Errorf("Native: Unable to parse template: %v, %v", value, err) } var buf bytes.Buffer - err = tmpl.Execute(&buf, *tpl_data) + err = tmpl.Execute(&buf, *tplData) if err != nil { return "", fmt.Errorf("Native: Unable to execute template: %v, %v", value, err) } @@ -250,54 +250,54 @@ func processTemplate(tpl_data *EnvData, value string) (string, error) { } // Runs the executable as defined user -func userRun(c *Config, env_data *EnvData, user, entry string, metadata map[string]any) (err error) { +func userRun(c *Config, envData *EnvData, user, entry string, metadata map[string]any) (err error) { // Entry value could contain template data - var tmp_data string - if tmp_data, err = processTemplate(env_data, entry); err != nil { + var tmpData string + if tmpData, err = processTemplate(envData, entry); err != nil { return log.Error("Native: Unable to process `entry` template:", entry, err) } - entry = tmp_data + entry = tmpData // Metadata values could contain template data - env_vars := make(map[string]any) + envVars := make(map[string]any) for key, val := range metadata { - if tmp_data, err = processTemplate(env_data, fmt.Sprintf("%v", val)); err != nil { + if tmpData, err = processTemplate(envData, fmt.Sprintf("%v", val)); err != nil { return log.Errorf("Native: Unable to process metadata `%s` template: %v", key, err) } // Add to the map of the variables to store - env_vars[key] = tmp_data + envVars[key] = tmpData } // Unfortunately passing the environment through the cmd.Env and sudo/su is not that easy, so // using a temp file instead, which is removed right after the entry is started. - env_file_data, err := util.SerializeMetadata("export", "", env_vars) + envFileData, err := util.SerializeMetadata("export", "", envVars) if err != nil { return log.Errorf("Native: Unable to serialize metadata into 'export' format: %v", err) } // Using common /tmp dir available for each user in the system - env_file, err := os.CreateTemp("/tmp", "*.metadata.sh") + envFile, err := os.CreateTemp("/tmp", "*.metadata.sh") if err != nil { return log.Error("Native: Unable to create temp env file:", err) } - defer os.Remove(env_file.Name()) - if _, err := env_file.Write(env_file_data); err != nil { + defer os.Remove(envFile.Name()) + if _, err := envFile.Write(envFileData); err != nil { return log.Error("Native: Unable to write temp env file:", err) } - if err := env_file.Close(); err != nil { + if err := envFile.Close(); err != nil { return log.Error("Native: Unable to close temp env file:", err) } // Add ACL permission to the env file to allow to read it by unprevileged user - if _, _, err := runAndLogRetry(5, 5*time.Second, nil, c.ChmodPath, "+a", fmt.Sprintf("user:%s:allow read,readattr,readextattr,readsecurity", user), env_file.Name()); err != nil { + if _, _, err := runAndLogRetry(5, 5*time.Second, nil, c.ChmodPath, "+a", fmt.Sprintf("user:%s:allow read,readattr,readextattr,readsecurity", user), envFile.Name()); err != nil { return log.Error("Native: Unable to set ACL for temp env file:", err) } // Prepare the command to execute entry from user home directory - shell_line := fmt.Sprintf("source %s; %s", env_file.Name(), shellescape.Quote(shellescape.StripUnsafe(entry))) - cmd := exec.Command(c.SudoPath, "-n", c.SuPath, "-l", user, "-c", shell_line) // #nosec G204 - if env_data != nil && env_data.Disks != nil { - if _, ok := env_data.Disks[""]; ok { - cmd.Dir = env_data.Disks[""] + shellLine := fmt.Sprintf("source %s; %s", envFile.Name(), shellescape.Quote(shellescape.StripUnsafe(entry))) + cmd := exec.Command(c.SudoPath, "-n", c.SuPath, "-l", user, "-c", shellLine) // #nosec G204 + if envData != nil && envData.Disks != nil { + if _, ok := envData.Disks[""]; ok { + cmd.Dir = envData.Disks[""] } } @@ -316,7 +316,7 @@ func userRun(c *Config, env_data *EnvData, user, entry string, metadata map[stri // TODO: Probably I should run cmd.Wait to make sure the captured OS resources are released, // but not sure about that... Maybe create a goroutine that will sit and wait there? - log.Debugf("Native: Started entry for user %q in directory %q with PID %d: %s", user, cmd.Dir, cmd.Process.Pid, shell_line) + log.Debugf("Native: Started entry for user %q in directory %q with PID %d: %s", user, cmd.Dir, cmd.Process.Pid, shellLine) // Giving the process 1 second to read the env file and not die from some unexpected error time.Sleep(time.Second) @@ -332,7 +332,7 @@ func userRun(c *Config, env_data *EnvData, user, entry string, metadata map[stri } // Stop the user processes -func userStop(c *Config, user string) (out_err error) { //nolint:unparam +func userStop(c *Config, user string) (outErr error) { //nolint:unparam // In theory we can use `sysadminctl -deleteUser` command instead, which is also stopping all the // user processes and cleans up the home dir, but it asks for elevated previleges so not sure how // useful it will be in automation... @@ -356,58 +356,58 @@ func userStop(c *Config, user string) (out_err error) { //nolint:unparam } // Delete user and clean up -func userDelete(c *Config, user string) (out_err error) { +func userDelete(c *Config, user string) (outErr error) { // Stopping the processes because they could cause user lock - out_err = userStop(c, user) + outErr = userStop(c, user) // Sometimes delete of the user could not be done due to MacOS blocking it, so retrying 5 times // Native: Command exited with error: exit status 40:
delete status: eDSPermissionError DS Error: -14120 (eDSPermissionError) if _, _, err := runAndLogRetry(5, 5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "delete", "/Users/"+user); err != nil { - out_err = log.Error("Native: Unable to delete user:", err) + outErr = log.Error("Native: Unable to delete user:", err) } if _, _, err := runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.RmPath, "-rf", "/Users/"+user); err != nil { - out_err = log.Error("Native: Unable to remove the user home directory:", err) + outErr = log.Error("Native: Unable to remove the user home directory:", err) } return } // Unmount user volumes and delete the disk files -func disksDelete(c *Config, user string) (out_err error) { +func disksDelete(c *Config, user string) (outErr error) { // Stopping the processes because they could cause user lock - out_err = userStop(c, user) + outErr = userStop(c, user) // Getting the list of the mounted volumes volumes, err := os.ReadDir("/Volumes") if err != nil { - out_err = log.Error("Native: Unable to list mounted volumes:", err) + outErr = log.Error("Native: Unable to list mounted volumes:", err) } - env_volumes := []string{} + envVolumes := []string{} for _, file := range volumes { if file.IsDir() && strings.HasPrefix(file.Name(), user) { - env_volumes = append(env_volumes, filepath.Join("/Volumes", file.Name())) + envVolumes = append(envVolumes, filepath.Join("/Volumes", file.Name())) } } // Umount the disk volumes if needed mounts, _, err := runAndLog(3*time.Second, nil, c.MountPath) if err != nil { - out_err = log.Error("Native: Unable to list the mount points:", user, err) + outErr = log.Error("Native: Unable to list the mount points:", user, err) } - for _, vol_path := range env_volumes { - if strings.Contains(mounts, vol_path) { - if _, _, err := runAndLog(5*time.Second, nil, c.HdiutilPath, "detach", vol_path); err != nil { - out_err = log.Error("Native: Unable to detach the volume disk:", user, vol_path, err) + for _, volPath := range envVolumes { + if strings.Contains(mounts, volPath) { + if _, _, err := runAndLog(5*time.Second, nil, c.HdiutilPath, "detach", volPath); err != nil { + outErr = log.Error("Native: Unable to detach the volume disk:", user, volPath, err) } } } // Cleaning the env work directory with disks - workspace_path := filepath.Join(c.WorkspacePath, user) - if _, err := os.Stat(workspace_path); !os.IsNotExist(err) { - if err := os.RemoveAll(workspace_path); err != nil { - out_err = log.Error("Native: Unable to remove user env workspace:", user, err) + workspacePath := filepath.Join(c.WorkspacePath, user) + if _, err := os.Stat(workspacePath); !os.IsNotExist(err) { + if err := os.RemoveAll(workspacePath); err != nil { + outErr = log.Error("Native: Unable to remove user env workspace:", user, err) } } @@ -417,30 +417,30 @@ func disksDelete(c *Config, user string) (out_err error) { // Creates disks directories described by the disks map, returns the map of disks to mount paths func (d *Driver) disksCreate(user string, disks map[string]types.ResourcesDisk) (map[string]string, error) { // Create disks - disk_paths := make(map[string]string, len(disks)) + diskPaths := make(map[string]string, len(disks)) - for d_name, disk := range disks { - disk_path := filepath.Join(d.cfg.WorkspacePath, user, "disk-"+d_name) - if err := os.MkdirAll(filepath.Dir(disk_path), 0o755); err != nil { - return disk_paths, err + for dName, disk := range disks { + diskPath := filepath.Join(d.cfg.WorkspacePath, user, "disk-"+dName) + if err := os.MkdirAll(filepath.Dir(diskPath), 0o755); err != nil { + return diskPaths, err } // Create disk // TODO: Ensure failures doesn't leave the changes behind (like mounted disks or files) if disk.Type == "dir" { - if err := os.MkdirAll(disk_path, 0o777); err != nil { - return disk_paths, err + if err := os.MkdirAll(diskPath, 0o777); err != nil { + return diskPaths, err } - disk_paths[d_name] = disk_path + diskPaths[dName] = diskPath // TODO: Validate the available disk space for disk.Size continue } // Create virtual disk in order to restrict the disk space - dmg_path := disk_path + ".dmg" + dmgPath := diskPath + ".dmg" - label := d_name + label := dName if disk.Label != "" { // Label can be used as mount point so cut the path separator out label = strings.ReplaceAll(disk.Label, "/", "") @@ -449,39 +449,39 @@ func (d *Driver) disksCreate(user string, disks map[string]types.ResourcesDisk) } // Do not recreate the disk if it is exists - if _, err := os.Stat(dmg_path); os.IsNotExist(err) { - args := []string{"create", dmg_path, + if _, err := os.Stat(dmgPath); os.IsNotExist(err) { + args := []string{"create", dmgPath, "-fs", "HFS+", "-layout", "NONE", "-volname", label, "-size", fmt.Sprintf("%dm", disk.Size*1024), } if _, _, err := runAndLog(10*time.Minute, nil, d.cfg.HdiutilPath, args...); err != nil { - return disk_paths, log.Error("Native: Unable to create dmg disk:", dmg_path, err) + return diskPaths, log.Error("Native: Unable to create dmg disk:", dmgPath, err) } } - mount_point := filepath.Join("/Volumes", fmt.Sprintf("%s_%s", user, d_name)) + mountPoint := filepath.Join("/Volumes", fmt.Sprintf("%s_%s", user, dName)) // Attach & mount disk - if _, _, err := runAndLog(10*time.Second, nil, d.cfg.HdiutilPath, "attach", dmg_path, "-owners", "on", "-mountpoint", mount_point); err != nil { - return disk_paths, log.Error("Native: Unable to attach dmg disk:", dmg_path, mount_point, err) + if _, _, err := runAndLog(10*time.Second, nil, d.cfg.HdiutilPath, "attach", dmgPath, "-owners", "on", "-mountpoint", mountPoint); err != nil { + return diskPaths, log.Error("Native: Unable to attach dmg disk:", dmgPath, mountPoint, err) } // Change the owner of the volume to user - if _, _, err := runAndLog(5*time.Second, nil, d.cfg.SudoPath, "-n", d.cfg.ChownPath, "-R", user+":staff", mount_point+"/"); err != nil { - return disk_paths, fmt.Errorf("Native: Error user disk mount path chown: %v", err) + if _, _, err := runAndLog(5*time.Second, nil, d.cfg.SudoPath, "-n", d.cfg.ChownPath, "-R", user+":staff", mountPoint+"/"); err != nil { + return diskPaths, fmt.Errorf("Native: Error user disk mount path chown: %v", err) } // (Optional) Disable spotlight for the mounted volume - if _, _, err := runAndLog(5*time.Second, nil, d.cfg.SudoPath, d.cfg.MdutilPath, "-i", "off", mount_point+"/"); err != nil { - log.Warn("Native: Unable to disable spotlight for the volume:", mount_point, err) + if _, _, err := runAndLog(5*time.Second, nil, d.cfg.SudoPath, d.cfg.MdutilPath, "-i", "off", mountPoint+"/"); err != nil { + log.Warn("Native: Unable to disable spotlight for the volume:", mountPoint, err) } - disk_paths[d_name] = mount_point + diskPaths[dName] = mountPoint } - return disk_paths, nil + return diskPaths, nil } // Runs & logs the executable command diff --git a/lib/drivers/test/driver.go b/lib/drivers/test/driver.go index 16df839..38ab2c3 100644 --- a/lib/drivers/test/driver.go +++ b/lib/drivers/test/driver.go @@ -46,7 +46,7 @@ func init() { type Driver struct { cfg Config // Contains the available tasks of the driver - tasks_list []drivers.ResourceDriverTask + tasksList []drivers.ResourceDriverTask } func (d *Driver) Name() string { @@ -66,7 +66,7 @@ func (d *Driver) Prepare(config []byte) error { } // Fill up the available tasks - d.tasks_list = append(d.tasks_list, &TaskSnapshot{driver: d}) + d.tasksList = append(d.tasksList, &TaskSnapshot{driver: d}) return nil } @@ -77,8 +77,8 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { } // Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDefinition) int64 { - var out_count int64 +func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { + var outCount int64 var opts Options if err := opts.Apply(req.Options); err != nil { @@ -91,51 +91,51 @@ func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDe return -1 } - total_cpu := d.cfg.CpuLimit - total_ram := d.cfg.RamLimit + totalCpu := d.cfg.CpuLimit + totalRam := d.cfg.RamLimit - if total_cpu == 0 && total_ram == 0 { + if totalCpu == 0 && totalRam == 0 { // Resources are unlimited return 99999 } // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > total_cpu { + if req.Resources.Cpu > totalCpu { return 0 } - if req.Resources.Ram > total_ram { + if req.Resources.Ram > totalRam { return 0 } // TODO: Check disk requirements // Since we have the required resources - let's check if tenancy allows us to expand them to // run more tenants here - if node_usage.IsEmpty() { + if nodeUsage.IsEmpty() { // In case we dealing with the first one - we need to set usage modificators, otherwise // those values will mess up the next calculations - node_usage.Multitenancy = req.Resources.Multitenancy - node_usage.CpuOverbook = req.Resources.CpuOverbook - node_usage.RamOverbook = req.Resources.RamOverbook + nodeUsage.Multitenancy = req.Resources.Multitenancy + nodeUsage.CpuOverbook = req.Resources.CpuOverbook + nodeUsage.RamOverbook = req.Resources.RamOverbook } - if node_usage.Multitenancy && req.Resources.Multitenancy { + if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much - if node_usage.CpuOverbook && req.Resources.CpuOverbook { - total_cpu += d.cfg.CpuOverbook + if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { + totalCpu += d.cfg.CpuOverbook } - if node_usage.RamOverbook && req.Resources.RamOverbook { - total_ram += d.cfg.RamOverbook + if nodeUsage.RamOverbook && req.Resources.RamOverbook { + totalRam += d.cfg.RamOverbook } } // Calculate how much of those definitions we could run - out_count = int64((total_cpu - node_usage.Cpu) / req.Resources.Cpu) - ram_count := int64((total_ram - node_usage.Ram) / req.Resources.Ram) - if out_count > ram_count { - out_count = ram_count + outCount = int64((totalCpu - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((totalRam - nodeUsage.Ram) / req.Resources.Ram) + if outCount > ramCount { + outCount = ramCount } // TODO: Add disks into equation - return out_count + return outCount } /** @@ -153,19 +153,19 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Generate random resource id and if exists - regenerate res := &types.Resource{} - var res_file string + var resFile string for { res.Identifier = "test-" + crypt.RandString(6) - res_file = filepath.Join(d.cfg.WorkspacePath, res.Identifier) - if _, err := os.Stat(res_file); os.IsNotExist(err) { + resFile = filepath.Join(d.cfg.WorkspacePath, res.Identifier) + if _, err := os.Stat(resFile); os.IsNotExist(err) { break } } // Write identifier file - fh, err := os.Create(res_file) + fh, err := os.Create(resFile) if err != nil { - return nil, fmt.Errorf("TEST: Unable to open file %q to store identifier: %v", res_file, err) + return nil, fmt.Errorf("TEST: Unable to open file %q to store identifier: %v", resFile, err) } defer fh.Close() @@ -180,8 +180,8 @@ func (d *Driver) Status(res *types.Resource) (string, error) { return "", fmt.Errorf("TEST: RandomFail: %v\n", err) } - res_file := filepath.Join(d.cfg.WorkspacePath, res.Identifier) - if _, err := os.Stat(res_file); !os.IsNotExist(err) { + resFile := filepath.Join(d.cfg.WorkspacePath, res.Identifier) + if _, err := os.Stat(resFile); !os.IsNotExist(err) { return drivers.StatusAllocated, nil } return drivers.StatusNone, nil @@ -190,7 +190,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask - for _, task := range d.tasks_list { + for _, task := range d.tasksList { if task.Name() == name { t = task.Clone() } @@ -215,11 +215,11 @@ func (d *Driver) Deallocate(res *types.Resource) error { return log.Error("TEST: RandomFail:", err) } - res_file := filepath.Join(d.cfg.WorkspacePath, res.Identifier) - if _, err := os.Stat(res_file); os.IsNotExist(err) { + resFile := filepath.Join(d.cfg.WorkspacePath, res.Identifier) + if _, err := os.Stat(resFile); os.IsNotExist(err) { return fmt.Errorf("TEST: Unable to deallocate unavailable resource '%s'", res.Identifier) } - if err := os.Remove(res_file); err != nil { + if err := os.Remove(resFile); err != nil { return fmt.Errorf("TEST: Unable to deallocate the resource '%s': %v", res.Identifier, err) } diff --git a/lib/drivers/test/tasks.go b/lib/drivers/test/tasks.go index 1c63455..290ef3b 100644 --- a/lib/drivers/test/tasks.go +++ b/lib/drivers/test/tasks.go @@ -62,8 +62,8 @@ func (t *TaskSnapshot) Execute() (result []byte, err error) { return []byte(`{}`), log.Error("TEST: RandomFail:", err) } - res_file := filepath.Join(t.driver.cfg.WorkspacePath, t.Resource.Identifier) - if _, err := os.Stat(res_file); os.IsNotExist(err) { + resFile := filepath.Join(t.driver.cfg.WorkspacePath, t.Resource.Identifier) + if _, err := os.Stat(resFile); os.IsNotExist(err) { return []byte(`{}`), fmt.Errorf("TEST: Unable to snapshot unavailable resource '%s'", t.Resource.Identifier) } diff --git a/lib/drivers/vmx/config.go b/lib/drivers/vmx/config.go index 210255b..d6be978 100644 --- a/lib/drivers/vmx/config.go +++ b/lib/drivers/vmx/config.go @@ -107,23 +107,23 @@ func (c *Config) Validate() (err error) { } // Validating CpuAlter & RamAlter to not be less then the current cpu/ram count - cpu_stat, err := cpu.Counts(true) + cpuStat, err := cpu.Counts(true) if err != nil { return err } - if c.CpuAlter < 0 && cpu_stat <= -c.CpuAlter { - return log.Errorf("VMX: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpu_stat) + if c.CpuAlter < 0 && cpuStat <= -c.CpuAlter { + return log.Errorf("VMX: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpuStat) } - mem_stat, err := mem.VirtualMemory() + memStat, err := mem.VirtualMemory() if err != nil { return err } - ram_stat := mem_stat.Total / 1073741824 // Getting GB from Bytes + ramStat := memStat.Total / 1073741824 // Getting GB from Bytes - if c.RamAlter < 0 && int(ram_stat) <= -c.RamAlter { - return log.Errorf("VMX: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ram_stat) + if c.RamAlter < 0 && int(ramStat) <= -c.RamAlter { + return log.Errorf("VMX: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ramStat) } return nil diff --git a/lib/drivers/vmx/driver.go b/lib/drivers/vmx/driver.go index 44e5f91..1ca74ec 100644 --- a/lib/drivers/vmx/driver.go +++ b/lib/drivers/vmx/driver.go @@ -49,10 +49,10 @@ func init() { type Driver struct { cfg Config // Contains the available tasks of the driver - tasks_list []drivers.ResourceDriverTask + tasksList []drivers.ResourceDriverTask - total_cpu uint // In logical threads - total_ram uint // In RAM GB + totalCpu uint // In logical threads + totalRam uint // In RAM GB } func (d *Driver) Name() string { @@ -72,17 +72,17 @@ func (d *Driver) Prepare(config []byte) error { } // Collect node resources status - cpu_stat, err := cpu.Counts(true) + cpuStat, err := cpu.Counts(true) if err != nil { return err } - d.total_cpu = uint(cpu_stat) + d.totalCpu = uint(cpuStat) - mem_stat, err := mem.VirtualMemory() + memStat, err := mem.VirtualMemory() if err != nil { return err } - d.total_ram = uint(mem_stat.Total / 1073741824) // Getting GB from Bytes + d.totalRam = uint(memStat.Total / 1073741824) // Getting GB from Bytes // TODO: Cleanup the image directory in case the images are not good @@ -101,48 +101,48 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { } // Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(node_usage types.Resources, req types.LabelDefinition) int64 { - var out_count int64 +func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { + var outCount int64 - avail_cpu, avail_ram := d.getAvailResources() + availCpu, availRam := d.getAvailResources() // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > avail_cpu { + if req.Resources.Cpu > availCpu { return 0 } - if req.Resources.Ram > avail_ram { + if req.Resources.Ram > availRam { return 0 } // TODO: Check disk requirements // Since we have the required resources - let's check if tenancy allows us to expand them to // run more tenants here - if node_usage.IsEmpty() { + if nodeUsage.IsEmpty() { // In case we dealing with the first one - we need to set usage modificators, otherwise // those values will mess up the next calculations - node_usage.Multitenancy = req.Resources.Multitenancy - node_usage.CpuOverbook = req.Resources.CpuOverbook - node_usage.RamOverbook = req.Resources.RamOverbook + nodeUsage.Multitenancy = req.Resources.Multitenancy + nodeUsage.CpuOverbook = req.Resources.CpuOverbook + nodeUsage.RamOverbook = req.Resources.RamOverbook } - if node_usage.Multitenancy && req.Resources.Multitenancy { + if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much - if node_usage.CpuOverbook && req.Resources.CpuOverbook { - avail_cpu += d.cfg.CpuOverbook + if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { + availCpu += d.cfg.CpuOverbook } - if node_usage.RamOverbook && req.Resources.RamOverbook { - avail_ram += d.cfg.RamOverbook + if nodeUsage.RamOverbook && req.Resources.RamOverbook { + availRam += d.cfg.RamOverbook } } // Calculate how much of those definitions we could run - out_count = int64((avail_cpu - node_usage.Cpu) / req.Resources.Cpu) - ram_count := int64((avail_ram - node_usage.Ram) / req.Resources.Ram) - if out_count > ram_count { - out_count = ram_count + outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + if outCount > ramCount { + outCount = ramCount } // TODO: Add disks into equation - return out_count + return outCount } /** @@ -160,72 +160,72 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Generate unique id from the hw address and required directories buf := crypt.RandBytes(6) buf[0] = (buf[0] | 2) & 0xfe // Set local bit, ensure unicast address - vm_id := fmt.Sprintf("%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) - vm_hwaddr := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) + vmId := fmt.Sprintf("%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) + vmHwaddr := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) - vm_network := def.Resources.Network - if vm_network == "" { - vm_network = "hostonly" + vmNetwork := def.Resources.Network + if vmNetwork == "" { + vmNetwork = "hostonly" } - vm_dir := filepath.Join(d.cfg.WorkspacePath, vm_id) - vm_images_dir := filepath.Join(vm_dir, "images") + vmDir := filepath.Join(d.cfg.WorkspacePath, vmId) + vmImagesDir := filepath.Join(vmDir, "images") // Load the required images - img_path, err := d.loadImages(&opts, vm_images_dir) + imgPath, err := d.loadImages(&opts, vmImagesDir) if err != nil { - d.cleanupVm(vm_dir) + d.cleanupVm(vmDir) return nil, log.Error("VMX: Unable to load the required images:", err) } // Clone VM from the image - vmx_path := filepath.Join(vm_dir, vm_id+".vmx") + vmxPath := filepath.Join(vmDir, vmId+".vmx") args := []string{"-T", "fusion", "clone", - img_path, vmx_path, + imgPath, vmxPath, "linked", "-snapshot", "original", - "-cloneName", vm_id, + "-cloneName", vmId, } if _, _, err := runAndLog(120*time.Second, d.cfg.VmrunPath, args...); err != nil { - d.cleanupVm(vm_dir) - return nil, log.Error("VMX: Unable to clone the target image:", img_path, err) + d.cleanupVm(vmDir) + return nil, log.Error("VMX: Unable to clone the target image:", imgPath, err) } // Change cloned vm configuration - if err := util.FileReplaceToken(vmx_path, + if err := util.FileReplaceToken(vmxPath, true, true, true, "ethernet0.addressType =", `ethernet0.addressType = "static"`, - "ethernet0.address =", fmt.Sprintf("ethernet0.address = %q", vm_hwaddr), - "ethernet0.connectiontype =", fmt.Sprintf("ethernet0.connectiontype = %q", vm_network), + "ethernet0.address =", fmt.Sprintf("ethernet0.address = %q", vmHwaddr), + "ethernet0.connectiontype =", fmt.Sprintf("ethernet0.connectiontype = %q", vmNetwork), "numvcpus =", fmt.Sprintf(`numvcpus = "%d"`, def.Resources.Cpu), "cpuid.corespersocket =", fmt.Sprintf(`cpuid.corespersocket = "%d"`, def.Resources.Cpu), "memsize =", fmt.Sprintf(`memsize = "%d"`, def.Resources.Ram*1024), ); err != nil { - d.cleanupVm(vm_dir) - return nil, log.Error("VMX: Unable to change cloned VM configuration:", vmx_path, err) + d.cleanupVm(vmDir) + return nil, log.Error("VMX: Unable to change cloned VM configuration:", vmxPath, err) } // Create and connect disks to vmx - if err := d.disksCreate(vmx_path, def.Resources.Disks); err != nil { - d.cleanupVm(vm_dir) - return nil, log.Error("VMX: Unable create disks for VM:", vmx_path, err) + if err := d.disksCreate(vmxPath, def.Resources.Disks); err != nil { + d.cleanupVm(vmDir) + return nil, log.Error("VMX: Unable create disks for VM:", vmxPath, err) } // Run the background monitoring of the vmware log if d.cfg.LogMonitor { - go d.logMonitor(vm_id, vmx_path) + go d.logMonitor(vmId, vmxPath) } // Run the VM - if _, _, err := runAndLog(120*time.Second, d.cfg.VmrunPath, "start", vmx_path, "nogui"); err != nil { + if _, _, err := runAndLog(120*time.Second, d.cfg.VmrunPath, "start", vmxPath, "nogui"); err != nil { log.Error("VMX: Check logs in ~/Library/Logs/VMware/ or enable debug to see vmware.log") - d.cleanupVm(vm_dir) - return nil, log.Error("VMX: Unable to run VM:", vmx_path, err) + d.cleanupVm(vmDir) + return nil, log.Error("VMX: Unable to run VM:", vmxPath, err) } - log.Info("VMX: Allocate of VM completed:", vmx_path) + log.Info("VMX: Allocate of VM completed:", vmxPath) return &types.Resource{ - Identifier: vmx_path, - HwAddr: vm_hwaddr, + Identifier: vmxPath, + HwAddr: vmHwaddr, Authentication: def.Authentication, }, nil } @@ -243,7 +243,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask - for _, task := range d.tasks_list { + for _, task := range d.tasksList { if task.Name() == name { t = task.Clone() } @@ -264,29 +264,29 @@ func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return fmt.Errorf("VMX: Invalid resource: %v", res) } - vmx_path := res.Identifier - if len(vmx_path) == 0 { - return log.Error("VMX: Unable to find VM:", vmx_path) + vmxPath := res.Identifier + if len(vmxPath) == 0 { + return log.Error("VMX: Unable to find VM:", vmxPath) } // Sometimes it's stuck, so try to stop a bit more than usual - if _, _, err := runAndLogRetry(3, 60*time.Second, d.cfg.VmrunPath, "stop", vmx_path); err != nil { - log.Warn("VMX: Unable to soft stop the VM:", vmx_path, err) + if _, _, err := runAndLogRetry(3, 60*time.Second, d.cfg.VmrunPath, "stop", vmxPath); err != nil { + log.Warn("VMX: Unable to soft stop the VM:", vmxPath, err) // Ok, it doesn't want to stop, so stopping it hard - if _, _, err := runAndLogRetry(3, 60*time.Second, d.cfg.VmrunPath, "stop", vmx_path, "hard"); err != nil { - return log.Error("VMX: Unable to deallocate VM:", vmx_path, err) + if _, _, err := runAndLogRetry(3, 60*time.Second, d.cfg.VmrunPath, "stop", vmxPath, "hard"); err != nil { + return log.Error("VMX: Unable to deallocate VM:", vmxPath, err) } } // Delete VM - if _, _, err := runAndLogRetry(3, 30*time.Second, d.cfg.VmrunPath, "deleteVM", vmx_path); err != nil { - return log.Error("VMX: Unable to delete VM:", vmx_path, err) + if _, _, err := runAndLogRetry(3, 30*time.Second, d.cfg.VmrunPath, "deleteVM", vmxPath); err != nil { + return log.Error("VMX: Unable to delete VM:", vmxPath, err) } // Cleaning the VM images too - d.cleanupVm(filepath.Dir(vmx_path)) + d.cleanupVm(filepath.Dir(vmxPath)) - log.Info("VMX: Deallocate of VM completed:", vmx_path) + log.Info("VMX: Deallocate of VM completed:", vmxPath) return nil } diff --git a/lib/drivers/vmx/options.go b/lib/drivers/vmx/options.go index 9aa537c..af7bf33 100644 --- a/lib/drivers/vmx/options.go +++ b/lib/drivers/vmx/options.go @@ -44,14 +44,14 @@ func (o *Options) Apply(options util.UnparsedJson) error { func (o *Options) Validate() error { // Check images - var img_err error + var imgErr error for index := range o.Images { if err := o.Images[index].Validate(); err != nil { - img_err = log.Error("VMX: Error during image validation:", err) + imgErr = log.Error("VMX: Error during image validation:", err) } } - if img_err != nil { - return img_err + if imgErr != nil { + return imgErr } return nil diff --git a/lib/drivers/vmx/util.go b/lib/drivers/vmx/util.go index f7afda4..0967508 100644 --- a/lib/drivers/vmx/util.go +++ b/lib/drivers/vmx/util.go @@ -33,31 +33,31 @@ import ( ) // Returns the total resources available for the node after alteration -func (d *Driver) getAvailResources() (avail_cpu, avail_ram uint) { +func (d *Driver) getAvailResources() (availCpu, availRam uint) { if d.cfg.CpuAlter < 0 { - avail_cpu = d.total_cpu - uint(-d.cfg.CpuAlter) + availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) } else { - avail_cpu = d.total_cpu + uint(d.cfg.CpuAlter) + availCpu = d.totalCpu + uint(d.cfg.CpuAlter) } if d.cfg.RamAlter < 0 { - avail_ram = d.total_ram - uint(-d.cfg.RamAlter) + availRam = d.totalRam - uint(-d.cfg.RamAlter) } else { - avail_ram = d.total_ram + uint(d.cfg.RamAlter) + availRam = d.totalRam + uint(d.cfg.RamAlter) } return } // Load images and returns the target image path for cloning -func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) { - if err := os.MkdirAll(vm_images_dir, 0o755); err != nil { - return "", log.Error("VMX: Unable to create the VM images dir:", vm_images_dir, err) +func (d *Driver) loadImages(opts *Options, vmImagesDir string) (string, error) { + if err := os.MkdirAll(vmImagesDir, 0o755); err != nil { + return "", log.Error("VMX: Unable to create the VM images dir:", vmImagesDir, err) } - target_path := "" + targetPath := "" var wg sync.WaitGroup - for image_index, image := range opts.Images { + for imageIndex, image := range opts.Images { log.Info("VMX: Loading the required image:", image.Name, image.Version, image.Url) // Running the background routine to download, unpack and process the image @@ -71,16 +71,16 @@ func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) // Getting the image subdir name in the unpacked dir subdir := "" - image_unpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) - items, err := os.ReadDir(image_unpacked) + imageUnpacked := filepath.Join(d.cfg.ImagesPath, image.Name+"-"+image.Version) + items, err := os.ReadDir(imageUnpacked) if err != nil { - return log.Error("VMX: Unable to read the unpacked directory:", image_unpacked, err) + return log.Error("VMX: Unable to read the unpacked directory:", imageUnpacked, err) } for _, f := range items { if strings.HasPrefix(f.Name(), image.Name) { if f.Type()&fs.ModeSymlink != 0 { // Potentially it can be a symlink (like used in local tests) - if _, err := os.Stat(filepath.Join(image_unpacked, f.Name())); err != nil { + if _, err := os.Stat(filepath.Join(imageUnpacked, f.Name())); err != nil { log.Warn("VMX: The image symlink is broken:", f.Name(), err) continue } @@ -90,46 +90,46 @@ func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) } } if subdir == "" { - return log.Errorf("VMX: Unpacked image '%s' has no subfolder '%s', only: %q", image_unpacked, image.Name, items) + return log.Errorf("VMX: Unpacked image '%s' has no subfolder '%s', only: %q", imageUnpacked, image.Name, items) } // The VMware clone operation modifies the image snapshots description so // we walk through the image files, link them to the workspace dir and copy // the files (except for vmdk bins) with path to the workspace images dir - root_dir := filepath.Join(image_unpacked, subdir) - out_dir := filepath.Join(vm_images_dir, subdir) + rootDir := filepath.Join(imageUnpacked, subdir) + outDir := filepath.Join(vmImagesDir, subdir) if index+1 == len(opts.Images) { // It's the last image in the list so the target one - target_path = filepath.Join(out_dir, image.Name+".vmx") + targetPath = filepath.Join(outDir, image.Name+".vmx") } - if err := os.MkdirAll(out_dir, 0o755); err != nil { - return log.Error("VMX: Unable to create the vm image dir:", out_dir, err) + if err := os.MkdirAll(outDir, 0o755); err != nil { + return log.Error("VMX: Unable to create the vm image dir:", outDir, err) } - tocopy_list, err := os.ReadDir(root_dir) + tocopyList, err := os.ReadDir(rootDir) if err != nil { - os.RemoveAll(out_dir) - return log.Error("VMX: Unable to list the image directory:", root_dir, err) + os.RemoveAll(outDir) + return log.Error("VMX: Unable to list the image directory:", rootDir, err) } - for _, entry := range tocopy_list { - in_path := filepath.Join(root_dir, entry.Name()) - out_path := filepath.Join(out_dir, entry.Name()) + for _, entry := range tocopyList { + inPath := filepath.Join(rootDir, entry.Name()) + outPath := filepath.Join(outDir, entry.Name()) // Check if the file is the big disk - if strings.HasSuffix(entry.Name(), ".vmdk") && util.FileStartsWith(in_path, []byte("# Disk DescriptorFile")) != nil { + if strings.HasSuffix(entry.Name(), ".vmdk") && util.FileStartsWith(inPath, []byte("# Disk DescriptorFile")) != nil { // Just link the disk image to the vm image dir - we will not modify it anyway - if err := os.Symlink(in_path, out_path); err != nil { - os.RemoveAll(out_dir) - return log.Error("VMX: Unable to symlink the image file:", in_path, out_path, err) + if err := os.Symlink(inPath, outPath); err != nil { + os.RemoveAll(outDir) + return log.Error("VMX: Unable to symlink the image file:", inPath, outPath, err) } continue } // Copy VM file in order to prevent the image modification - if err := util.FileCopy(in_path, out_path); err != nil { - os.RemoveAll(out_dir) - return log.Error("VMX: Unable to copy the image file:", in_path, out_path, err) + if err := util.FileCopy(inPath, outPath); err != nil { + os.RemoveAll(outDir) + return log.Error("VMX: Unable to copy the image file:", inPath, outPath, err) } // Deprecated functionality @@ -138,17 +138,17 @@ func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) // // Modify the vmsd file cloneOf0 to replace token - it requires absolute path if strings.HasSuffix(entry.Name(), ".vmsd") { - if err := util.FileReplaceToken(out_path, + if err := util.FileReplaceToken(outPath, false, false, false, - "", vm_images_dir, + "", vmImagesDir, ); err != nil { - os.RemoveAll(out_dir) + os.RemoveAll(outDir) return log.Error("VMX: Unable to replace full path token in vmsd:", image.Name, err) } } } return nil - }(image, image_index) + }(image, imageIndex) } log.Debug("VMX: Wait for all the background image processes to be done...") @@ -157,16 +157,16 @@ func (d *Driver) loadImages(opts *Options, vm_images_dir string) (string, error) log.Info("VMX: The images are processed.") // Check all the images are in place just by number of them - vm_images, _ := os.ReadDir(vm_images_dir) - if len(opts.Images) != len(vm_images) { + vmImages, _ := os.ReadDir(vmImagesDir) + if len(opts.Images) != len(vmImages) { return "", log.Error("VMX: The image processes gone wrong, please check log for the errors") } - return target_path, nil + return targetPath, nil } // Returns true if the VM with provided identifier is allocated -func (d *Driver) isAllocated(vmx_path string) bool { +func (d *Driver) isAllocated(vmxPath string) bool { // Probably it's better to store the current list in the memory and // update on fnotify or something like that... stdout, _, err := runAndLog(10*time.Second, d.cfg.VmrunPath, "list") @@ -175,7 +175,7 @@ func (d *Driver) isAllocated(vmx_path string) bool { } for _, line := range strings.Split(stdout, "\n") { - if vmx_path == line { + if vmxPath == line { return true } } @@ -184,27 +184,27 @@ func (d *Driver) isAllocated(vmx_path string) bool { } // Creates VMDK disks described by the disks map -func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDisk) error { +func (d *Driver) disksCreate(vmxPath string, disks map[string]types.ResourcesDisk) error { // Create disk files - var disk_paths []string - for d_name, disk := range disks { - disk_path := filepath.Join(filepath.Dir(vmx_path), d_name) + var diskPaths []string + for dName, disk := range disks { + diskPath := filepath.Join(filepath.Dir(vmxPath), dName) if disk.Reuse { - disk_path = filepath.Join(d.cfg.WorkspacePath, "disk-"+d_name, d_name) - if err := os.MkdirAll(filepath.Dir(disk_path), 0o755); err != nil { + diskPath = filepath.Join(d.cfg.WorkspacePath, "disk-"+dName, dName) + if err := os.MkdirAll(filepath.Dir(diskPath), 0o755); err != nil { return err } } - rel_path, err := filepath.Rel(filepath.Dir(vmx_path), disk_path+".vmdk") + relPath, err := filepath.Rel(filepath.Dir(vmxPath), diskPath+".vmdk") if err != nil { - log.Warn("VMX: Unable to get relative path for disk:", disk_path+".vmdk", err) - disk_paths = append(disk_paths, disk_path) + log.Warn("VMX: Unable to get relative path for disk:", diskPath+".vmdk", err) + diskPaths = append(diskPaths, diskPath) } else { - disk_paths = append(disk_paths, rel_path) + diskPaths = append(diskPaths, relPath) } - if _, err := os.Stat(disk_path + ".vmdk"); !os.IsNotExist(err) { + if _, err := os.Stat(diskPath + ".vmdk"); !os.IsNotExist(err) { continue } @@ -213,56 +213,56 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi // TODO: Ensure failures doesn't leave the changes behind (like mounted disks or files) // Create virtual disk - dmg_path := disk_path + ".dmg" - var disk_type string + dmgPath := diskPath + ".dmg" + var diskType string switch disk.Type { case "hfs+": - disk_type = "HFS+" + diskType = "HFS+" case "fat32": - disk_type = "FAT32" + diskType = "FAT32" default: - disk_type = "ExFAT" + diskType = "ExFAT" } - label := d_name + label := dName if disk.Label != "" { label = disk.Label } - args := []string{"create", dmg_path, - "-fs", disk_type, + args := []string{"create", dmgPath, + "-fs", diskType, "-layout", "NONE", "-volname", label, "-size", fmt.Sprintf("%dm", disk.Size*1024), } if _, _, err := runAndLog(10*time.Minute, "/usr/bin/hdiutil", args...); err != nil { - return log.Error("VMX: Unable to create dmg disk:", dmg_path, err) + return log.Error("VMX: Unable to create dmg disk:", dmgPath, err) } - vm_name := strings.TrimSuffix(filepath.Base(vmx_path), ".vmx") - mount_point := filepath.Join("/Volumes", fmt.Sprintf("%s-%s", vm_name, d_name)) + vmName := strings.TrimSuffix(filepath.Base(vmxPath), ".vmx") + mountPoint := filepath.Join("/Volumes", fmt.Sprintf("%s-%s", vmName, dName)) // Attach & mount disk - stdout, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "attach", dmg_path, "-mountpoint", mount_point) + stdout, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "attach", dmgPath, "-mountpoint", mountPoint) if err != nil { - return log.Error("VMX: Unable to attach dmg disk:", dmg_path, mount_point, err) + return log.Error("VMX: Unable to attach dmg disk:", dmgPath, mountPoint, err) } // Get attached disk device - dev_path := strings.SplitN(stdout, " ", 2)[0] + devPath := strings.SplitN(stdout, " ", 2)[0] // Allow anyone to modify the disk content - if err := os.Chmod(mount_point, 0o777); err != nil { - return log.Error("VMX: Unable to change the disk access rights:", mount_point, err) + if err := os.Chmod(mountPoint, 0o777); err != nil { + return log.Error("VMX: Unable to change the disk access rights:", mountPoint, err) } // Umount disk (use diskutil to umount for sure) - _, _, err = runAndLog(10*time.Second, "/usr/sbin/diskutil", "umount", mount_point) + _, _, err = runAndLog(10*time.Second, "/usr/sbin/diskutil", "umount", mountPoint) if err != nil { - return log.Error("VMX: Unable to umount dmg disk:", mount_point, err) + return log.Error("VMX: Unable to umount dmg disk:", mountPoint, err) } // Detach disk - if _, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "detach", dev_path); err != nil { - return log.Error("VMX: Unable to detach dmg disk:", dev_path, err) + if _, _, err := runAndLog(10*time.Second, "/usr/bin/hdiutil", "detach", devPath); err != nil { + return log.Error("VMX: Unable to detach dmg disk:", devPath, err) } // Create vmdk by using the pregenerated vmdk template @@ -271,7 +271,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi // mounted at the same time, so avoiding to use it by using template: // `Unable to create the source raw disk: Resource deadlock avoided` // To generate template: vmware-rawdiskCreator create /dev/disk2 1 ./disk_name lsilogic - vmdk_template := strings.Join([]string{ + vmdkTemplate := strings.Join([]string{ `# Disk DescriptorFile`, `version=1`, `encoding="UTF-8"`, @@ -283,7 +283,7 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi // Format: http://sanbarrow.com/vmdk/disktypes.html // // size, offset - number in amount of sectors - fmt.Sprintf(`RW %d FLAT %q 0`, disk.Size*1024*1024*2, dmg_path), + fmt.Sprintf(`RW %d FLAT %q 0`, disk.Size*1024*1024*2, dmgPath), ``, `# The Disk Data Base`, `#DDB`, @@ -294,70 +294,70 @@ func (d *Driver) disksCreate(vmx_path string, disks map[string]types.ResourcesDi `ddb.virtualHWVersion = "14"`, }, "\n") - if err := os.WriteFile(disk_path+"_tmp.vmdk", []byte(vmdk_template), 0o640); err != nil { //nolint:gosec // G306 - return log.Error("VMX: Unable to place the template vmdk file:", disk_path+"_tmp.vmdk", err) + if err := os.WriteFile(diskPath+"_tmp.vmdk", []byte(vmdkTemplate), 0o640); err != nil { //nolint:gosec // G306 + return log.Error("VMX: Unable to place the template vmdk file:", diskPath+"_tmp.vmdk", err) } // Convert linked vmdk to standalone vmdk - if _, _, err := runAndLog(10*time.Minute, d.cfg.VdiskmanagerPath, "-r", disk_path+"_tmp.vmdk", "-t", "0", disk_path+".vmdk"); err != nil { - return log.Error("VMX: Unable to create vmdk disk:", disk_path+".vmdk", err) + if _, _, err := runAndLog(10*time.Minute, d.cfg.VdiskmanagerPath, "-r", diskPath+"_tmp.vmdk", "-t", "0", diskPath+".vmdk"); err != nil { + return log.Error("VMX: Unable to create vmdk disk:", diskPath+".vmdk", err) } // Remove temp files - for _, path := range []string{dmg_path, disk_path + "_tmp.vmdk"} { + for _, path := range []string{dmgPath, diskPath + "_tmp.vmdk"} { if err := os.Remove(path); err != nil { return log.Error("VMX: Unable to remove tmp disk files:", path, err) } } } - if len(disk_paths) == 0 { + if len(diskPaths) == 0 { return nil } // Connect disk files to vmx - var to_replace []string + var toReplace []string // Use SCSI adapter - to_replace = append(to_replace, + toReplace = append(toReplace, "sata1.present =", `sata1.present = "TRUE"`, ) - for i, disk_path := range disk_paths { + for i, diskPath := range diskPaths { prefix := fmt.Sprintf("sata1:%d", i) - to_replace = append(to_replace, + toReplace = append(toReplace, prefix+".present =", prefix+`.present = "TRUE"`, - prefix+".fileName =", fmt.Sprintf("%s.fileName = %q", prefix, disk_path), + prefix+".fileName =", fmt.Sprintf("%s.fileName = %q", prefix, diskPath), ) } - if err := util.FileReplaceToken(vmx_path, true, true, true, to_replace...); err != nil { - return log.Error("VMX: Unable to add disks to the VM configuration:", vmx_path, err) + if err := util.FileReplaceToken(vmxPath, true, true, true, toReplace...); err != nil { + return log.Error("VMX: Unable to add disks to the VM configuration:", vmxPath, err) } return nil } // Ensures the VM is not stale by monitoring the log -func (d *Driver) logMonitor(vm_id, vmx_path string) { +func (d *Driver) logMonitor(vmId, vmxPath string) { // Monitor the vmware.log file - log_path := filepath.Join(filepath.Dir(vmx_path), "vmware.log") - t, _ := tail.TailFile(log_path, tail.Config{Follow: true, Poll: true}) - log.Debug("VMX: Start monitoring of log:", vm_id, log_path) + logPath := filepath.Join(filepath.Dir(vmxPath), "vmware.log") + t, _ := tail.TailFile(logPath, tail.Config{Follow: true, Poll: true}) + log.Debug("VMX: Start monitoring of log:", vmId, logPath) for line := range t.Lines { - log.Debug("VMX:", vm_id, "vmware.log:", line) + log.Debug("VMX:", vmId, "vmware.log:", line) // Send reset if the VM is switched to 0 status if strings.Contains(line.Text, "Tools: Changing running status: 1 => 0") { - log.Warn("VMX: Resetting the stale VM", vm_id, vmx_path) + log.Warn("VMX: Resetting the stale VM", vmId, vmxPath) // We should not spend much time here, because we can miss // the file delete so running in a separated thread - go runAndLog(10*time.Second, d.cfg.VmrunPath, "reset", vmx_path) + go runAndLog(10*time.Second, d.cfg.VmrunPath, "reset", vmxPath) } } - log.Debug("VMX: Done monitoring of log:", vm_id, log_path) + log.Debug("VMX: Done monitoring of log:", vmId, logPath) } // Removes the entire directory for clean up purposes -func (d *Driver) cleanupVm(vm_dir string) error { - if err := os.RemoveAll(vm_dir); err != nil { - log.Warn("VMX: Unable to clean up the vm directory:", vm_dir, err) +func (d *Driver) cleanupVm(vmDir string) error { + if err := os.RemoveAll(vmDir); err != nil { + log.Warn("VMX: Unable to clean up the vm directory:", vmDir, err) return err } diff --git a/lib/fish/application.go b/lib/fish/application.go index 2d5d945..e2385cc 100644 --- a/lib/fish/application.go +++ b/lib/fish/application.go @@ -25,13 +25,13 @@ import ( func (f *Fish) ApplicationFind(filter *string) (as []types.Application, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return as, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&as).Error return as, err @@ -81,8 +81,8 @@ func (f *Fish) ApplicationListGetStatusNew() (as []types.Application, err error) return as, err } -func (f *Fish) ApplicationIsAllocated(app_uid types.ApplicationUID) (err error) { - state, err := f.ApplicationStateGetByApplication(app_uid) +func (f *Fish) ApplicationIsAllocated(appUid types.ApplicationUID) (err error) { + state, err := f.ApplicationStateGetByApplication(appUid) if err != nil { return err } else if state.Status != types.ApplicationStatusALLOCATED { diff --git a/lib/fish/application_state.go b/lib/fish/application_state.go index 8354288..9e785bc 100644 --- a/lib/fish/application_state.go +++ b/lib/fish/application_state.go @@ -48,9 +48,9 @@ func (f *Fish) ApplicationStateGet(uid types.ApplicationStateUID) (as *types.App return as, err } -func (f *Fish) ApplicationStateGetByApplication(app_uid types.ApplicationUID) (as *types.ApplicationState, err error) { +func (f *Fish) ApplicationStateGetByApplication(appUid types.ApplicationUID) (as *types.ApplicationState, err error) { as = &types.ApplicationState{} - err = f.db.Where("application_uid = ?", app_uid).Order("created_at desc").First(as).Error + err = f.db.Where("application_uid = ?", appUid).Order("created_at desc").First(as).Error return as, err } diff --git a/lib/fish/application_task.go b/lib/fish/application_task.go index 141e09c..5868c7f 100644 --- a/lib/fish/application_task.go +++ b/lib/fish/application_task.go @@ -25,14 +25,14 @@ import ( func (f *Fish) ApplicationTaskFindByApplication(uid types.ApplicationUID, filter *string) (at []types.ApplicationTask, err error) { db := f.db.Where("application_uid = ?", uid) if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return at, nil } // Adding parentheses to be sure we're have `application_uid AND (filter)` - db = db.Where("(" + secured_filter + ")") + db = db.Where("(" + securedFilter + ")") } err = db.Find(&at).Error return at, err @@ -66,7 +66,7 @@ func (f *Fish) ApplicationTaskGet(uid types.ApplicationTaskUID) (at *types.Appli return at, err } -func (f *Fish) ApplicationTaskListByApplicationAndWhen(app_uid types.ApplicationUID, when types.ApplicationStatus) (at []types.ApplicationTask, err error) { - err = f.db.Where(`application_uid = ? AND "when" = ?`, app_uid, when).Order("created_at desc").Find(&at).Error +func (f *Fish) ApplicationTaskListByApplicationAndWhen(appUid types.ApplicationUID, when types.ApplicationStatus) (at []types.ApplicationTask, err error) { + err = f.db.Where(`application_uid = ? AND "when" = ?`, appUid, when).Order("created_at desc").Find(&at).Error return at, err } diff --git a/lib/fish/config.go b/lib/fish/config.go index 9189dc2..4af588b 100644 --- a/lib/fish/config.go +++ b/lib/fish/config.go @@ -55,12 +55,12 @@ type ConfigDriver struct { Cfg util.UnparsedJson `json:"cfg"` } -func (c *Config) ReadConfigFile(cfg_path string) error { +func (c *Config) ReadConfigFile(cfgPath string) error { c.initDefaults() - if cfg_path != "" { + if cfgPath != "" { // Open and parse - data, err := os.ReadFile(cfg_path) + data, err := os.ReadFile(cfgPath) if err != nil { return err } diff --git a/lib/fish/drivers.go b/lib/fish/drivers.go index f70a55e..d1ca78a 100644 --- a/lib/fish/drivers.go +++ b/lib/fish/drivers.go @@ -28,14 +28,14 @@ import ( _ "github.com/adobe/aquarium-fish/lib/drivers/test" ) -var drivers_instances map[string]drivers.ResourceDriver +var driversInstances map[string]drivers.ResourceDriver func (f *Fish) DriverGet(name string) drivers.ResourceDriver { - if drivers_instances == nil { + if driversInstances == nil { log.Error("Fish: Resource drivers are not initialized to request the driver instance:", name) return nil } - drv := drivers_instances[name] + drv := driversInstances[name] return drv } @@ -66,33 +66,33 @@ func (f *Fish) DriversSet() error { } } - drivers_instances = instances + driversInstances = instances return nil } func (f *Fish) DriversPrepare(configs []ConfigDriver) (errs []error) { - activated_drivers_instances := make(map[string]drivers.ResourceDriver) - for name, drv := range drivers_instances { + activatedDriversInstances := make(map[string]drivers.ResourceDriver) + for name, drv := range driversInstances { // Looking for the driver config - var json_cfg []byte + var jsonCfg []byte for _, cfg := range configs { if name == cfg.Name { - json_cfg = []byte(cfg.Cfg) + jsonCfg = []byte(cfg.Cfg) break } } - if err := drv.Prepare(json_cfg); err != nil { + if err := drv.Prepare(jsonCfg); err != nil { errs = append(errs, err) log.Warn("Fish: Resource driver prepare failed:", drv.Name(), err) } else { - activated_drivers_instances[name] = drv + activatedDriversInstances[name] = drv log.Info("Fish: Resource driver activated:", drv.Name()) } } - drivers_instances = activated_drivers_instances + driversInstances = activatedDriversInstances return errs } diff --git a/lib/fish/fish.go b/lib/fish/fish.go index b74e3e6..e8835d7 100644 --- a/lib/fish/fish.go +++ b/lib/fish/fish.go @@ -34,7 +34,7 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -const ELECTION_ROUND_TIME = 30 +const ElectionRoundTime = 30 type Fish struct { db *gorm.DB @@ -44,26 +44,26 @@ type Fish struct { // Signal to stop the fish Quit chan os.Signal - running bool - maintenance bool - shutdown bool - shutdown_cancel chan bool - shutdown_delay time.Duration + running bool + maintenance bool + shutdown bool + shutdownCancel chan bool + shutdownDelay time.Duration - active_votes_mutex sync.Mutex - active_votes []*types.Vote + activeVotesMutex sync.Mutex + activeVotes []*types.Vote // Stores the currently executing Applications - applications_mutex sync.Mutex - applications []types.ApplicationUID + applicationsMutex sync.Mutex + applications []types.ApplicationUID // Used to temporary store the won Votes by Application create time - won_votes_mutex sync.Mutex - won_votes map[int64]types.Vote + wonVotesMutex sync.Mutex + wonVotes map[int64]types.Vote // Stores the current usage of the node resources - node_usage_mutex sync.Mutex // Is needed to protect node resources from concurrent allocations - node_usage types.Resources + nodeUsageMutex sync.Mutex // Is needed to protect node resources from concurrent allocations + nodeUsage types.Resources } func New(db *gorm.DB, cfg *Config) (*Fish, error) { @@ -76,7 +76,7 @@ func New(db *gorm.DB, cfg *Config) (*Fish, error) { } func (f *Fish) Init() error { - f.shutdown_cancel = make(chan bool) + f.shutdownCancel = make(chan bool) f.Quit = make(chan os.Signal, 1) signal.Notify(f.Quit, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) @@ -97,7 +97,7 @@ func (f *Fish) Init() error { } // Init variables - f.won_votes = make(map[int64]types.Vote, 5) + f.wonVotes = make(map[int64]types.Vote, 5) // Create admin user and ignore errors if it's existing _, err := f.UserGet("admin") @@ -111,11 +111,11 @@ func (f *Fish) Init() error { } // Init node - create_node := false + createNode := false node, err := f.NodeGet(f.cfg.NodeName) if err != nil { log.Info("Fish: Create new node:", f.cfg.NodeName, f.cfg.NodeLocation) - create_node = true + createNode = true node = &types.Node{ Name: f.cfg.NodeName, @@ -136,16 +136,16 @@ func (f *Fish) Init() error { log.Info("Fish: Use existing node:", node.Name, node.LocationName) } - cert_path := f.cfg.TLSCrt - if !filepath.IsAbs(cert_path) { - cert_path = filepath.Join(f.cfg.Directory, cert_path) + certPath := f.cfg.TLSCrt + if !filepath.IsAbs(certPath) { + certPath = filepath.Join(f.cfg.Directory, certPath) } - if err := node.Init(f.cfg.NodeAddress, cert_path); err != nil { + if err := node.Init(f.cfg.NodeAddress, certPath); err != nil { return fmt.Errorf("Fish: Unable to init node: %v", err) } f.node = node - if create_node { + if createNode { if err = f.NodeCreate(f.node); err != nil { return fmt.Errorf("Fish: Unable to create node: %v", err) } @@ -200,10 +200,10 @@ func (f *Fish) Init() error { if err := f.ResourceDelete(res.UID); err != nil { log.Error("Fish: Unable to delete Resource of Application:", res.ApplicationUID, err) } - app_state := &types.ApplicationState{ApplicationUID: res.ApplicationUID, Status: types.ApplicationStatusERROR, + appState := &types.ApplicationState{ApplicationUID: res.ApplicationUID, Status: types.ApplicationStatusERROR, Description: "Found not cleaned up resource", } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } } @@ -243,21 +243,21 @@ func (f *Fish) GetLocationName() types.LocationName { } func (f *Fish) checkNewApplicationProcess() { - check_ticker := time.NewTicker(5 * time.Second) + checkTicker := time.NewTicker(5 * time.Second) for { if !f.running { break } // TODO: Here should be select with quit in case app is stopped to not wait next ticker - <-check_ticker.C + <-checkTicker.C { // Check new apps available for processing - new_apps, err := f.ApplicationListGetStatusNew() + newApps, err := f.ApplicationListGetStatusNew() if err != nil { log.Error("Fish: Unable to get NEW ApplicationState list:", err) continue } - for _, app := range new_apps { + for _, app := range newApps { // Check if Vote is already here if f.voteActive(app.UID) { continue @@ -265,7 +265,7 @@ func (f *Fish) checkNewApplicationProcess() { log.Info("Fish: NEW Application with no vote:", app.UID, app.CreatedAt) // Vote not exists in the active votes - running the process - f.active_votes_mutex.Lock() + f.activeVotesMutex.Lock() { // Check if it's already exist in the DB (if node was restarted during voting) vote, _ := f.VoteGetNodeApplication(f.node.UID, app.UID) @@ -274,32 +274,32 @@ func (f *Fish) checkNewApplicationProcess() { vote.ApplicationUID = app.UID vote.NodeUID = f.node.UID - f.active_votes = append(f.active_votes, vote) + f.activeVotes = append(f.activeVotes, vote) go f.voteProcessRound(vote) } - f.active_votes_mutex.Unlock() + f.activeVotesMutex.Unlock() } // Check the Applications ready to be allocated // It's needed to be single-threaded to have some order in allocation - FIFO principle, // who requested first should be processed first. - f.won_votes_mutex.Lock() + f.wonVotesMutex.Lock() { // We need to sort the won_votes by key which is time they was created - keys := make([]int64, 0, len(f.won_votes)) - for k := range f.won_votes { + keys := make([]int64, 0, len(f.wonVotes)) + for k := range f.wonVotes { keys = append(keys, k) } sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) for _, k := range keys { - if err := f.executeApplication(f.won_votes[k]); err != nil { - log.Errorf("Fish: Can't execute Application %s: %v", f.won_votes[k].ApplicationUID, err) + if err := f.executeApplication(f.wonVotes[k]); err != nil { + log.Errorf("Fish: Can't execute Application %s: %v", f.wonVotes[k].ApplicationUID, err) } - delete(f.won_votes, k) + delete(f.wonVotes, k) } } - f.won_votes_mutex.Unlock() + f.wonVotesMutex.Unlock() } } } @@ -319,13 +319,13 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { } for { - start_time := time.Now() + startTime := time.Now() log.Infof("Fish: Starting Application %s election round %d", vote.ApplicationUID, vote.Round) // Determine answer for this round, it will try find the first possible definition to serve // We can't run multiple resources check at a time or together with // allocating application so using mutex here - f.node_usage_mutex.Lock() + f.nodeUsageMutex.Lock() vote.Available = -1 // Set "nope" answer by default in case all the definitions are not fit for i, def := range label.Definitions { if f.isNodeAvailableForDefinition(def) { @@ -333,7 +333,7 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { break } } - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() // Create vote if it's required if vote.UID == uuid.Nil { @@ -356,15 +356,15 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { if len(votes) == len(nodes) { // Ok, all nodes are voted so let's move to election // Check if there's yes answers - available_exists := false + availableExists := false for _, vote := range votes { if vote.Available >= 0 { - available_exists = true + availableExists = true break } } - if available_exists { + if availableExists { // Check if the winner is this node vote, err := f.VoteGetElectionWinner(vote.ApplicationUID, vote.Round) if err != nil { @@ -376,9 +376,9 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { if err != nil { return log.Error("Fish: Unable to get the Application:", vote.ApplicationUID, err) } - f.won_votes_mutex.Lock() - f.won_votes[app.CreatedAt.UnixMicro()] = *vote - f.won_votes_mutex.Unlock() + f.wonVotesMutex.Lock() + f.wonVotes[app.CreatedAt.UnixMicro()] = *vote + f.wonVotesMutex.Unlock() } else { log.Infof("Fish: I lose the election for Application %s to Node %s", vote.ApplicationUID, vote.NodeUID) } @@ -386,8 +386,8 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { // Wait till the next round for ELECTION_ROUND_TIME since round start t := time.Now() - to_sleep := start_time.Add(ELECTION_ROUND_TIME * time.Second).Sub(t) - time.Sleep(to_sleep) + toSleep := startTime.Add(ElectionRoundTime * time.Second).Sub(t) + time.Sleep(toSleep) // Check if the Application changed state s, err := f.ApplicationStateGetByApplication(vote.ApplicationUID) @@ -430,11 +430,11 @@ func (f *Fish) isNodeAvailableForDefinition(def types.LabelDefinition) bool { // Verify node filters because some workload can't be running on all the physical nodes // The node becomes fitting only when all the needed node filter patterns are matched if len(def.Resources.NodeFilter) > 0 { - needed_idents := def.Resources.NodeFilter - current_idents := f.cfg.NodeIdentifiers - for _, needed := range needed_idents { + neededIdents := def.Resources.NodeFilter + currentIdents := f.cfg.NodeIdentifiers + for _, needed := range neededIdents { found := false - for _, value := range current_idents { + for _, value := range currentIdents { // We're validating the pattern on error during label creation, so they should be ok if found, _ = path.Match(needed, value); found { break @@ -449,8 +449,8 @@ func (f *Fish) isNodeAvailableForDefinition(def types.LabelDefinition) bool { // Here all the node filters matched the node identifiers // Check with the driver if it's possible to allocate the Application resource - node_usage := f.node_usage - if capacity := driver.AvailableCapacity(node_usage, def); capacity < 1 { + nodeUsage := f.nodeUsage + if capacity := driver.AvailableCapacity(nodeUsage, def); capacity < 1 { return false } @@ -459,17 +459,17 @@ func (f *Fish) isNodeAvailableForDefinition(def types.LabelDefinition) bool { func (f *Fish) executeApplication(vote types.Vote) error { // Check the application is executed already - f.applications_mutex.Lock() + f.applicationsMutex.Lock() { for _, uid := range f.applications { if uid == vote.ApplicationUID { // Seems the application is already executing - f.applications_mutex.Unlock() + f.applicationsMutex.Unlock() return nil } } } - f.applications_mutex.Unlock() + f.applicationsMutex.Unlock() // Check vote have available field >= 0 means it chose the label definition if vote.Available < 0 { @@ -477,226 +477,226 @@ func (f *Fish) executeApplication(vote types.Vote) error { } // Locking the node resources until the app will be allocated - f.node_usage_mutex.Lock() + f.nodeUsageMutex.Lock() app, err := f.ApplicationGet(vote.ApplicationUID) if err != nil { - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() return fmt.Errorf("Fish: Unable to get the Application: %v", err) } // Check current Application state - app_state, err := f.ApplicationStateGetByApplication(app.UID) + appState, err := f.ApplicationStateGetByApplication(app.UID) if err != nil { - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() return fmt.Errorf("Fish: Unable to get the Application state: %v", err) } // Get label with the definitions label, err := f.LabelGet(app.LabelUID) if err != nil { - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() return fmt.Errorf("Fish: Unable to find Label %s: %v", app.LabelUID, err) } // Extract the vote won Label Definition if len(label.Definitions) <= vote.Available { - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() return fmt.Errorf("Fish: ERROR: The voted Definition not exists in the Label %s: %v (App: %s)", app.LabelUID, vote.Available, app.UID) } - label_def := label.Definitions[vote.Available] + labelDef := label.Definitions[vote.Available] // The already running applications will not consume the additional resources - if app_state.Status == types.ApplicationStatusNEW { + if appState.Status == types.ApplicationStatusNEW { // In case there is multiple Applications won the election process on the same node it could // just have not enough resources, so skip it for now to allow the other Nodes to try again. - if !f.isNodeAvailableForDefinition(label_def) { + if !f.isNodeAvailableForDefinition(labelDef) { log.Warn("Fish: Not enough resources to execute the Application", app.UID) - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() return nil } } // Locate the required driver - driver := f.DriverGet(label_def.Driver) + driver := f.DriverGet(labelDef.Driver) if driver == nil { - f.node_usage_mutex.Unlock() - return fmt.Errorf("Fish: Unable to locate driver for the Application %s: %s", app.UID, label_def.Driver) + f.nodeUsageMutex.Unlock() + return fmt.Errorf("Fish: Unable to locate driver for the Application %s: %s", app.UID, labelDef.Driver) } // If the driver is not using the remote resources - we need to increase the counter if !driver.IsRemote() { - f.node_usage.Add(label_def.Resources) + f.nodeUsage.Add(labelDef.Resources) } // Unlocking the node resources to allow the other Applications allocation - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Unlock() // Adding the application to list - f.applications_mutex.Lock() + f.applicationsMutex.Lock() f.applications = append(f.applications, app.UID) - f.applications_mutex.Unlock() + f.applicationsMutex.Unlock() // The main application processing is executed on background because allocation could take a // while, after that the bg process will wait for application state change go func() { - log.Info("Fish: Start executing Application", app.UID, app_state.Status) + log.Info("Fish: Start executing Application", app.UID, appState.Status) - if app_state.Status == types.ApplicationStatusNEW { + if appState.Status == types.ApplicationStatusNEW { // Set Application state as ELECTED - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusELECTED, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusELECTED, Description: "Elected node: " + f.node.Name, } - err := f.ApplicationStateCreate(app_state) + err := f.ApplicationStateCreate(appState) if err != nil { log.Error("Fish: Unable to set Application state:", app.UID, err) - f.applications_mutex.Lock() + f.applicationsMutex.Lock() f.removeFromExecutingApplincations(app.UID) - f.applications_mutex.Unlock() + f.applicationsMutex.Unlock() return } } // Merge application and label metadata, in this exact order - var merged_metadata []byte + var mergedMetadata []byte var metadata map[string]any if err := json.Unmarshal([]byte(app.Metadata), &metadata); err != nil { log.Error("Fish: Unable to parse the Application metadata:", app.UID, err) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Unable to parse the app metadata:", err), } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } if err := json.Unmarshal([]byte(label.Metadata), &metadata); err != nil { log.Error("Fish: Unable to parse the Label metadata:", label.UID, err) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Unable to parse the label metadata:", err), } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } - if merged_metadata, err = json.Marshal(metadata); err != nil { + if mergedMetadata, err = json.Marshal(metadata); err != nil { log.Error("Fish: Unable to merge metadata:", label.UID, err) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Unable to merge metadata:", err), } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } // Get or create the new resource object res := &types.Resource{ ApplicationUID: app.UID, NodeUID: f.node.UID, - Metadata: util.UnparsedJson(merged_metadata), + Metadata: util.UnparsedJson(mergedMetadata), } - if app_state.Status == types.ApplicationStatusALLOCATED { + if appState.Status == types.ApplicationStatusALLOCATED { res, err = f.ResourceGetByApplication(app.UID) if err != nil { log.Error("Fish: Unable to get the allocated Resource for Application:", app.UID, err) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Unable to find the allocated resource:", err), } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } } // Allocate the resource - if app_state.Status == types.ApplicationStatusELECTED { + if appState.Status == types.ApplicationStatusELECTED { // Run the allocation log.Infof("Fish: Allocate the Application %s resource using driver: %s", app.UID, driver.Name()) - drv_res, err := driver.Allocate(label_def, metadata) + drvRes, err := driver.Allocate(labelDef, metadata) if err != nil { log.Error("Fish: Unable to allocate resource for the Application:", app.UID, err) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Driver allocate resource error:", err), } } else { - res.Identifier = drv_res.Identifier - res.HwAddr = drv_res.HwAddr - res.IpAddr = drv_res.IpAddr + res.Identifier = drvRes.Identifier + res.HwAddr = drvRes.HwAddr + res.IpAddr = drvRes.IpAddr res.LabelUID = label.UID res.DefinitionIndex = vote.Available - res.Authentication = drv_res.Authentication + res.Authentication = drvRes.Authentication err := f.ResourceCreate(res) if err != nil { log.Error("Fish: Unable to store Resource for Application:", app.UID, err) } - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusALLOCATED, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusALLOCATED, Description: "Driver allocated the resource", } log.Infof("Fish: Allocated Resource %q for the Application %s", app.UID, res.Identifier) } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } // Getting the resource lifetime to know how much time it will live - resource_lifetime, err := time.ParseDuration(label_def.Resources.Lifetime) - if label_def.Resources.Lifetime != "" && err != nil { + resourceLifetime, err := time.ParseDuration(labelDef.Resources.Lifetime) + if labelDef.Resources.Lifetime != "" && err != nil { log.Error("Fish: Can't parse the Lifetime from Label Definition:", label.UID, res.DefinitionIndex) } if err != nil { // Try to get default value from fish config - resource_lifetime, err = time.ParseDuration(f.cfg.DefaultResourceLifetime) + resourceLifetime, err = time.ParseDuration(f.cfg.DefaultResourceLifetime) if err != nil { // Not an error - in worst case the resource will just sit there but at least will // not ruin the workload execution log.Warn("Fish: Default Resource Lifetime is not set in fish config") } } - resource_timeout := res.CreatedAt.Add(resource_lifetime) - if app_state.Status == types.ApplicationStatusALLOCATED { - if resource_lifetime > 0 { - log.Infof("Fish: Resource of Application %s will be deallocated by timeout in %s (%s)", app.UID, resource_lifetime, resource_timeout) + resourceTimeout := res.CreatedAt.Add(resourceLifetime) + if appState.Status == types.ApplicationStatusALLOCATED { + if resourceLifetime > 0 { + log.Infof("Fish: Resource of Application %s will be deallocated by timeout in %s (%s)", app.UID, resourceLifetime, resourceTimeout) } else { log.Warn("Fish: Resource have no lifetime set and will live until deallocated by user:", app.UID) } } // Run the loop to wait for deallocate request - var deallocate_retry uint8 = 1 - for app_state.Status == types.ApplicationStatusALLOCATED { + var deallocateRetry uint8 = 1 + for appState.Status == types.ApplicationStatusALLOCATED { if !f.running { log.Info("Fish: Stopping the Application execution:", app.UID) return } - app_state, err = f.ApplicationStateGetByApplication(app.UID) + appState, err = f.ApplicationStateGetByApplication(app.UID) if err != nil { log.Error("Fish: Unable to get Status for Application:", app.UID, err) } // Check if it's life timeout for the resource - if resource_lifetime > 0 { + if resourceLifetime > 0 { // The time limit is set - so let's use resource create time and find out timeout - if resource_timeout.Before(time.Now()) { + if resourceTimeout.Before(time.Now()) { // Seems the timeout has come, so fish asks for application deallocate - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusDEALLOCATE, - Description: fmt.Sprint("Resource lifetime timeout reached:", resource_lifetime), + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusDEALLOCATE, + Description: fmt.Sprint("Resource lifetime timeout reached:", resourceLifetime), } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } } // Execute the existing ApplicationTasks. It will be executed during ALLOCATED or prior // to executing deallocation by DEALLOCATE & RECALLED which right now is useful for // `snapshot` and `image` tasks. - f.executeApplicationTasks(driver, &label_def, res, app_state.Status) + f.executeApplicationTasks(driver, &labelDef, res, appState.Status) - if app_state.Status == types.ApplicationStatusDEALLOCATE || app_state.Status == types.ApplicationStatusRECALLED { + if appState.Status == types.ApplicationStatusDEALLOCATE || appState.Status == types.ApplicationStatusRECALLED { log.Info("Fish: Running Deallocate of the Application and Resource:", app.UID, res.Identifier) // Deallocating and destroy the resource if err := driver.Deallocate(res); err != nil { - log.Errorf("Fish: Unable to deallocate the Resource of Application: %s (try: %d): %v", app.UID, deallocate_retry, err) + log.Errorf("Fish: Unable to deallocate the Resource of Application: %s (try: %d): %v", app.UID, deallocateRetry, err) // Let's retry to deallocate the resource 10 times before give up - if deallocate_retry <= 10 { - deallocate_retry += 1 + if deallocateRetry <= 10 { + deallocateRetry += 1 time.Sleep(10 * time.Second) continue } - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusERROR, Description: fmt.Sprint("Driver deallocate resource error:", err), } } else { log.Info("Fish: Successful deallocation of the Application:", app.UID) - app_state = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusDEALLOCATED, + appState = &types.ApplicationState{ApplicationUID: app.UID, Status: types.ApplicationStatusDEALLOCATED, Description: "Driver deallocated the resource", } } @@ -705,35 +705,35 @@ func (f *Fish) executeApplication(vote types.Vote) error { if err := f.ResourceDelete(res.UID); err != nil { log.Error("Fish: Unable to delete Resource for Application:", app.UID, err) } - f.ApplicationStateCreate(app_state) + f.ApplicationStateCreate(appState) } else { time.Sleep(5 * time.Second) } } - f.applications_mutex.Lock() + f.applicationsMutex.Lock() { // Decrease the amout of running local apps if !driver.IsRemote() { - f.node_usage_mutex.Lock() - f.node_usage.Subtract(label_def.Resources) - f.node_usage_mutex.Unlock() + f.nodeUsageMutex.Lock() + f.nodeUsage.Subtract(labelDef.Resources) + f.nodeUsageMutex.Unlock() } // Clean the executing application f.removeFromExecutingApplincations(app.UID) } - f.applications_mutex.Unlock() + f.applicationsMutex.Unlock() - log.Info("Fish: Done executing Application", app.UID, app_state.Status) + log.Info("Fish: Done executing Application", app.UID, appState.Status) }() return nil } -func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.LabelDefinition, res *types.Resource, app_status types.ApplicationStatus) error { +func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.LabelDefinition, res *types.Resource, appStatus types.ApplicationStatus) error { // Execute the associated ApplicationTasks if there is some - tasks, err := f.ApplicationTaskListByApplicationAndWhen(res.ApplicationUID, app_status) + tasks, err := f.ApplicationTaskListByApplicationAndWhen(res.ApplicationUID, appStatus) if err != nil { return log.Error("Fish: Unable to get ApplicationTasks:", res.ApplicationUID, err) } @@ -764,9 +764,9 @@ func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.La return nil } -func (f *Fish) removeFromExecutingApplincations(app_uid types.ApplicationUID) { +func (f *Fish) removeFromExecutingApplincations(appUid types.ApplicationUID) { for i, uid := range f.applications { - if uid != app_uid { + if uid != appUid { continue } f.applications[i] = f.applications[len(f.applications)-1] @@ -775,29 +775,29 @@ func (f *Fish) removeFromExecutingApplincations(app_uid types.ApplicationUID) { } } -func (f *Fish) voteActive(app_uid types.ApplicationUID) bool { - f.active_votes_mutex.Lock() - defer f.active_votes_mutex.Unlock() +func (f *Fish) voteActive(appUid types.ApplicationUID) bool { + f.activeVotesMutex.Lock() + defer f.activeVotesMutex.Unlock() - for _, vote := range f.active_votes { - if vote.ApplicationUID == app_uid { + for _, vote := range f.activeVotes { + if vote.ApplicationUID == appUid { return true } } return false } -func (f *Fish) voteActiveRemove(vote_uid types.VoteUID) { - f.active_votes_mutex.Lock() - defer f.active_votes_mutex.Unlock() - av := f.active_votes +func (f *Fish) voteActiveRemove(voteUid types.VoteUID) { + f.activeVotesMutex.Lock() + defer f.activeVotesMutex.Unlock() + av := f.activeVotes - for i, v := range f.active_votes { - if v.UID != vote_uid { + for i, v := range f.activeVotes { + if v.UID != voteUid { continue } av[i] = av[len(av)-1] - f.active_votes = av[:len(av)-1] + f.activeVotes = av[:len(av)-1] break } } @@ -822,7 +822,7 @@ func (f *Fish) ShutdownSet(value bool) { f.activateShutdown() } else { log.Info("Fish: Disabled shutdown mode") - f.shutdown_cancel <- true + f.shutdownCancel <- true } } @@ -831,49 +831,49 @@ func (f *Fish) ShutdownSet(value bool) { // Set of how much time to wait before executing the node shutdown operation func (f *Fish) ShutdownDelaySet(delay time.Duration) { - if f.shutdown_delay != delay { + if f.shutdownDelay != delay { log.Info("Fish: Shutdown delay is set to:", delay) } - f.shutdown_delay = delay + f.shutdownDelay = delay } func (f *Fish) activateShutdown() { - log.Infof("Fish: Enabled shutdown mode with maintenance: %v, delay: %v", f.maintenance, f.shutdown_delay) + log.Infof("Fish: Enabled shutdown mode with maintenance: %v, delay: %v", f.maintenance, f.shutdownDelay) - wait_apps := make(chan bool, 1) + waitApps := make(chan bool, 1) // Running the main shutdown routine go func() { - fire_shutdown := make(chan bool, 1) - delay_ticker_report := &time.Ticker{} - delay_timer := &time.Timer{} - var delay_end_time time.Time + fireShutdown := make(chan bool, 1) + delayTickerReport := &time.Ticker{} + delayTimer := &time.Timer{} + var delayEndTime time.Time for { select { - case <-f.shutdown_cancel: + case <-f.shutdownCancel: return - case <-wait_apps: + case <-waitApps: // Maintenance mode: All the apps are completed so it's safe to shutdown log.Debug("Fish: Shutdown: apps execution completed") // If the delay is set, then running timer to execute shutdown with delay - if f.shutdown_delay > 0 { - delay_end_time = time.Now().Add(f.shutdown_delay) - delay_ticker_report := time.NewTicker(30 * time.Second) - defer delay_ticker_report.Stop() - delay_timer = time.NewTimer(f.shutdown_delay) - defer delay_timer.Stop() + if f.shutdownDelay > 0 { + delayEndTime = time.Now().Add(f.shutdownDelay) + delayTickerReport := time.NewTicker(30 * time.Second) + defer delayTickerReport.Stop() + delayTimer = time.NewTimer(f.shutdownDelay) + defer delayTimer.Stop() } else { // No delay is needed, so shutdown now - fire_shutdown <- true + fireShutdown <- true } - case <-delay_ticker_report.C: - log.Infof("Fish: Shutdown: countdown: T-%v", time.Until(delay_end_time)) - case <-delay_timer.C: + case <-delayTickerReport.C: + log.Infof("Fish: Shutdown: countdown: T-%v", time.Until(delayEndTime)) + case <-delayTimer.C: // Delay time has passed, triggering shutdown - fire_shutdown <- true - case <-fire_shutdown: + fireShutdown <- true + case <-fireShutdown: log.Info("Fish: Shutdown sends quit signal to Fish") f.Quit <- syscall.SIGQUIT } @@ -883,29 +883,29 @@ func (f *Fish) activateShutdown() { if f.maintenance { // Running wait for unfinished apps go routine go func() { - ticker_check := time.NewTicker(2 * time.Second) - defer ticker_check.Stop() - ticker_report := time.NewTicker(30 * time.Second) - defer ticker_report.Stop() + tickerCheck := time.NewTicker(2 * time.Second) + defer tickerCheck.Stop() + tickerReport := time.NewTicker(30 * time.Second) + defer tickerReport.Stop() for { select { - case <-f.shutdown_cancel: + case <-f.shutdownCancel: return - case <-ticker_check.C: + case <-tickerCheck.C: // Need to make sure we're not executing any workload log.Debug("Fish: Shutdown: checking apps execution:", len(f.applications)) if len(f.applications) == 0 { - wait_apps <- true + waitApps <- true return } - case <-ticker_report.C: + case <-tickerReport.C: log.Info("Fish: Shutdown: waiting for running Applications:", len(f.applications)) } } }() } else { // Sending signal since no need to wait for the apps - wait_apps <- true + waitApps <- true } } diff --git a/lib/fish/label.go b/lib/fish/label.go index 03d6435..c27b2f6 100644 --- a/lib/fish/label.go +++ b/lib/fish/label.go @@ -24,13 +24,13 @@ import ( func (f *Fish) LabelFind(filter *string) (labels []types.Label, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return labels, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&labels).Error return labels, err diff --git a/lib/fish/location.go b/lib/fish/location.go index 854516f..03adc2d 100644 --- a/lib/fish/location.go +++ b/lib/fish/location.go @@ -23,13 +23,13 @@ import ( func (f *Fish) LocationFind(filter *string) (ls []types.Location, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return ls, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&ls).Error return ls, err diff --git a/lib/fish/node.go b/lib/fish/node.go index 9e3cefa..4ab9862 100644 --- a/lib/fish/node.go +++ b/lib/fish/node.go @@ -27,13 +27,13 @@ import ( func (f *Fish) NodeFind(filter *string) (ns []types.Node, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return ns, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&ns).Error return ns, err @@ -41,7 +41,7 @@ func (f *Fish) NodeFind(filter *string) (ns []types.Node, err error) { func (f *Fish) NodeActiveList() (ns []types.Node, err error) { // Only the nodes that pinged at least twice the delay time - t := time.Now().Add(-types.NODE_PING_DELAY * 2 * time.Second) + t := time.Now().Add(-types.NodePingDelay * 2 * time.Second) err = f.db.Where("updated_at > ?", t).Find(&ns).Error return ns, err } @@ -77,14 +77,14 @@ func (f *Fish) NodeGet(name string) (node *types.Node, err error) { func (f *Fish) pingProcess() { // In order to optimize network & database - update just UpdatedAt field - ping_ticker := time.NewTicker(types.NODE_PING_DELAY * time.Second) + pingTicker := time.NewTicker(types.NodePingDelay * time.Second) for { if !f.running { break } // TODO: Here should be select with quit in case app is stopped to not wait next ticker - <-ping_ticker.C + <-pingTicker.C log.Debug("Fish Node: ping") f.NodePing(f.node) } diff --git a/lib/fish/resource.go b/lib/fish/resource.go index 28509c0..c7a20d5 100644 --- a/lib/fish/resource.go +++ b/lib/fish/resource.go @@ -29,20 +29,20 @@ import ( func (f *Fish) ResourceFind(filter *string) (rs []types.Resource, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return rs, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&rs).Error return rs, err } -func (f *Fish) ResourceListNode(node_uid types.NodeUID) (rs []types.Resource, err error) { - err = f.db.Where("node_uid = ?", node_uid).Find(&rs).Error +func (f *Fish) ResourceListNode(nodeUid types.NodeUID) (rs []types.Resource, err error) { + err = f.db.Where("node_uid = ?", nodeUid).Find(&rs).Error return rs, err } @@ -119,7 +119,7 @@ func checkIPv4Address(network *net.IPNet, ip net.IP) bool { func isControlledNetwork(ip string) bool { // Relatively long process executed for each request, but gives us flexibility // TODO: Could be optimized to collect network data on start or periodically - ip_parsed := net.ParseIP(ip) + ipParsed := net.ParseIP(ip) ifaces, err := net.Interfaces() if err != nil { @@ -137,7 +137,7 @@ func isControlledNetwork(ip string) bool { for _, a := range addrs { switch v := a.(type) { case *net.IPNet: - if checkIPv4Address(v, ip_parsed) { + if checkIPv4Address(v, ipParsed) { return true } } @@ -168,13 +168,13 @@ func (f *Fish) ResourceGetByIP(ip string) (res *types.Resource, err error) { // Check by MAC and update IP if found // need to fix due to on mac arp can return just one digit - hw_addr := fixHwAddr(arp.Search(ip)) - if hw_addr == "" { + hwAddr := fixHwAddr(arp.Search(ip)) + if hwAddr == "" { return nil, gorm.ErrRecordNotFound } - err = f.db.Where("node_uid = ?", f.GetNodeUID()).Where("hw_addr = ?", hw_addr).First(res).Error + err = f.db.Where("node_uid = ?", f.GetNodeUID()).Where("hw_addr = ?", hwAddr).First(res).Error if err != nil { - return nil, fmt.Errorf("Fish: %s for HW address %s", err, hw_addr) + return nil, fmt.Errorf("Fish: %s for HW address %s", err, hwAddr) } // Check if the state is allocated to prevent old resources access @@ -189,9 +189,9 @@ func (f *Fish) ResourceGetByIP(ip string) (res *types.Resource, err error) { return res, err } -func (f *Fish) ResourceGetByApplication(app_uid types.ApplicationUID) (res *types.Resource, err error) { +func (f *Fish) ResourceGetByApplication(appUid types.ApplicationUID) (res *types.Resource, err error) { res = &types.Resource{} - err = f.db.Where("application_uid = ?", app_uid).First(res).Error + err = f.db.Where("application_uid = ?", appUid).First(res).Error return res, err } diff --git a/lib/fish/resource_access.go b/lib/fish/resource_access.go index dad4a84..a5ad39e 100644 --- a/lib/fish/resource_access.go +++ b/lib/fish/resource_access.go @@ -36,8 +36,8 @@ func (f *Fish) ResourceAccessCreate(r *types.ResourceAccess) error { return f.db.Create(r).Error } -func (f *Fish) ResourceAccessDeleteByResource(resource_uid types.ResourceUID) error { - ra := types.ResourceAccess{ResourceUID: resource_uid} +func (f *Fish) ResourceAccessDeleteByResource(resourceUid types.ResourceUID) error { + ra := types.ResourceAccess{ResourceUID: resourceUid} return f.db.Where(&ra).Delete(&ra).Error } diff --git a/lib/fish/servicemapping.go b/lib/fish/servicemapping.go index 85dd97c..63221b1 100644 --- a/lib/fish/servicemapping.go +++ b/lib/fish/servicemapping.go @@ -23,13 +23,13 @@ import ( func (f *Fish) ServiceMappingFind(filter *string) (sms []types.ServiceMapping, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return sms, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&sms).Error return sms, err diff --git a/lib/fish/user.go b/lib/fish/user.go index 27dfe60..771f023 100644 --- a/lib/fish/user.go +++ b/lib/fish/user.go @@ -24,13 +24,13 @@ import ( func (f *Fish) UserFind(filter *string) (us []types.User, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return us, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&us).Error return us, err @@ -65,7 +65,7 @@ func (f *Fish) UserAuth(name string, password string) *types.User { return nil } - if user.Hash.Algo != crypt.Argon2_Algo { + if user.Hash.Algo != crypt.Argon2Algo { log.Warnf("Please regenerate password for user %q to improve the API performance", name) } diff --git a/lib/fish/vote.go b/lib/fish/vote.go index 0c07c7b..ad2223f 100644 --- a/lib/fish/vote.go +++ b/lib/fish/vote.go @@ -26,13 +26,13 @@ import ( func (f *Fish) VoteFind(filter *string) (vs []types.Vote, err error) { db := f.db if filter != nil { - secured_filter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSqlFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information return vs, nil } - db = db.Where(secured_filter) + db = db.Where(securedFilter) } err = db.Find(&vs).Error return vs, err @@ -62,27 +62,27 @@ func (f *Fish) VoteGet(uid types.VoteUID) (v *types.Vote, err error) { return v, err } -func (f *Fish) VoteCurrentRoundGet(app_uid types.ApplicationUID) uint16 { +func (f *Fish) VoteCurrentRoundGet(appUid types.ApplicationUID) uint16 { var result types.Vote - f.db.Select("max(round) as round").Where("application_uid = ?", app_uid).First(&result) + f.db.Select("max(round) as round").Where("application_uid = ?", appUid).First(&result) return result.Round } -func (f *Fish) VoteListGetApplicationRound(app_uid types.ApplicationUID, round uint16) (vs []types.Vote, err error) { - err = f.db.Where("application_uid = ?", app_uid).Where("round = ?", round).Find(&vs).Error +func (f *Fish) VoteListGetApplicationRound(appUid types.ApplicationUID, round uint16) (vs []types.Vote, err error) { + err = f.db.Where("application_uid = ?", appUid).Where("round = ?", round).Find(&vs).Error return vs, err } -func (f *Fish) VoteGetElectionWinner(app_uid types.ApplicationUID, round uint16) (v *types.Vote, err error) { +func (f *Fish) VoteGetElectionWinner(appUid types.ApplicationUID, round uint16) (v *types.Vote, err error) { // Current rule is simple - sort everyone answered smallest available number and the first one wins v = &types.Vote{} - err = f.db.Where("application_uid = ?", app_uid).Where("round = ?", round).Where("available >= 0"). + err = f.db.Where("application_uid = ?", appUid).Where("round = ?", round).Where("available >= 0"). Order("available ASC").Order("created_at ASC").Order("rand ASC").First(&v).Error return v, err } -func (f *Fish) VoteGetNodeApplication(node_uid types.NodeUID, app_uid types.ApplicationUID) (v *types.Vote, err error) { +func (f *Fish) VoteGetNodeApplication(nodeUid types.NodeUID, appUid types.ApplicationUID) (v *types.Vote, err error) { v = &types.Vote{} - err = f.db.Where("application_uid = ?", app_uid).Where("node_uid = ?", node_uid).Order("round DESC").First(&v).Error + err = f.db.Where("application_uid = ?", appUid).Where("node_uid = ?", nodeUid).Order("round DESC").First(&v).Error return v, err } diff --git a/lib/openapi/api/api_v1.go b/lib/openapi/api/api_v1.go index 52389e8..bdae940 100644 --- a/lib/openapi/api/api_v1.go +++ b/lib/openapi/api/api_v1.go @@ -135,14 +135,14 @@ func (e *Processor) UserCreateUpdatePost(c echo.Context) error { password = crypt.RandString(64) } - mod_user, err := e.fish.UserGet(data.Name) + modUser, err := e.fish.UserGet(data.Name) if err == nil { // Updating existing user - mod_user.Hash = crypt.NewHash(password, nil) - e.fish.UserSave(mod_user) + modUser.Hash = crypt.NewHash(password, nil) + e.fish.UserSave(modUser) } else { // Creating new user - password, mod_user, err = e.fish.UserNew(data.Name, password) + password, modUser, err = e.fish.UserNew(data.Name, password) if err != nil { c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to create user: %v", err)}) return fmt.Errorf("Unable to create user: %w", err) @@ -150,8 +150,8 @@ func (e *Processor) UserCreateUpdatePost(c echo.Context) error { } // Fill the output values - data.CreatedAt = mod_user.CreatedAt - data.UpdatedAt = mod_user.UpdatedAt + data.CreatedAt = modUser.CreatedAt + data.UpdatedAt = modUser.UpdatedAt if data.Password == "" { data.Password = password } else { @@ -247,14 +247,14 @@ func (e *Processor) ResourceAccessPut(c echo.Context, uid types.ResourceUID) err return fmt.Errorf("Only the owner & admin can assign service mapping to the Application") } - r_access := types.ResourceAccess{ + rAccess := types.ResourceAccess{ ResourceUID: res.UID, Username: user.Name, Password: crypt.RandString(64), } - e.fish.ResourceAccessCreate(&r_access) + e.fish.ResourceAccessCreate(&rAccess) - return c.JSON(http.StatusOK, r_access) + return c.JSON(http.StatusOK, rAccess) } func (e *Processor) ApplicationListGet(c echo.Context, params types.ApplicationListGetParams) error { @@ -271,13 +271,13 @@ func (e *Processor) ApplicationListGet(c echo.Context, params types.ApplicationL return fmt.Errorf("Not authentified") } if user.Name != "admin" { - var owner_out []types.Application + var ownerOut []types.Application for _, app := range out { if app.OwnerName == user.Name { - owner_out = append(owner_out, app) + ownerOut = append(ownerOut, app) } } - out = owner_out + out = ownerOut } return c.JSON(http.StatusOK, out) @@ -381,11 +381,11 @@ func (e *Processor) ApplicationStateGet(c echo.Context, uid types.ApplicationUID return c.JSON(http.StatusOK, out) } -func (e *Processor) ApplicationTaskListGet(c echo.Context, app_uid types.ApplicationUID, params types.ApplicationTaskListGetParams) error { - app, err := e.fish.ApplicationGet(app_uid) +func (e *Processor) ApplicationTaskListGet(c echo.Context, appUid types.ApplicationUID, params types.ApplicationTaskListGetParams) error { + app, err := e.fish.ApplicationGet(appUid) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", app_uid)}) - return fmt.Errorf("Unable to find the Application: %s, %w", app_uid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUid)}) + return fmt.Errorf("Unable to find the Application: %s, %w", appUid, err) } // Only the owner of the application (or admin) could get the tasks @@ -399,7 +399,7 @@ func (e *Processor) ApplicationTaskListGet(c echo.Context, app_uid types.Applica return fmt.Errorf("Only the owner of Application & admin can get the Application Tasks") } - out, err := e.fish.ApplicationTaskFindByApplication(app_uid, params.Filter) + out, err := e.fish.ApplicationTaskFindByApplication(appUid, params.Filter) if err != nil { c.JSON(http.StatusInternalServerError, H{"message": fmt.Sprintf("Unable to get the Application Tasks list: %v", err)}) return fmt.Errorf("Unable to get the Application Tasks list: %w", err) @@ -408,11 +408,11 @@ func (e *Processor) ApplicationTaskListGet(c echo.Context, app_uid types.Applica return c.JSON(http.StatusOK, out) } -func (e *Processor) ApplicationTaskCreatePost(c echo.Context, app_uid types.ApplicationUID) error { - app, err := e.fish.ApplicationGet(app_uid) +func (e *Processor) ApplicationTaskCreatePost(c echo.Context, appUid types.ApplicationUID) error { + app, err := e.fish.ApplicationGet(appUid) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", app_uid)}) - return fmt.Errorf("Unable to find the Application: %s, %w", app_uid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUid)}) + return fmt.Errorf("Unable to find the Application: %s, %w", appUid, err) } // Only the owner of the application (or admin) could create the tasks @@ -433,7 +433,7 @@ func (e *Processor) ApplicationTaskCreatePost(c echo.Context, app_uid types.Appl } // Set Application UID for the task forcefully to not allow creating tasks for the other Apps - data.ApplicationUID = app_uid + data.ApplicationUID = appUid if err := e.fish.ApplicationTaskCreate(&data); err != nil { c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to create ApplicationTask: %v", err)}) @@ -443,11 +443,11 @@ func (e *Processor) ApplicationTaskCreatePost(c echo.Context, app_uid types.Appl return c.JSON(http.StatusOK, data) } -func (e *Processor) ApplicationTaskGet(c echo.Context, task_uid types.ApplicationTaskUID) error { - task, err := e.fish.ApplicationTaskGet(task_uid) +func (e *Processor) ApplicationTaskGet(c echo.Context, taskUid types.ApplicationTaskUID) error { + task, err := e.fish.ApplicationTaskGet(taskUid) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", task_uid)}) - return fmt.Errorf("Unable to find the ApplicationTask: %s, %w", task_uid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", taskUid)}) + return fmt.Errorf("Unable to find the ApplicationTask: %s, %w", taskUid, err) } app, err := e.fish.ApplicationGet(task.ApplicationUID) @@ -498,12 +498,12 @@ func (e *Processor) ApplicationDeallocateGet(c echo.Context, uid types.Applicati return fmt.Errorf("Unable to deallocate the Application with status: %s", out.Status) } - new_status := types.ApplicationStatusDEALLOCATE + newStatus := types.ApplicationStatusDEALLOCATE if out.Status != types.ApplicationStatusALLOCATED { // The Application was not yet Allocated so just mark it as Recalled - new_status = types.ApplicationStatusRECALLED + newStatus = types.ApplicationStatusRECALLED } - as := &types.ApplicationState{ApplicationUID: uid, Status: new_status, + as := &types.ApplicationState{ApplicationUID: uid, Status: newStatus, Description: fmt.Sprintf("Requested by user %s", user.Name), } err = e.fish.ApplicationStateCreate(as) diff --git a/lib/openapi/meta/meta_v1.go b/lib/openapi/meta/meta_v1.go index 59f1efd..3bd96f8 100644 --- a/lib/openapi/meta/meta_v1.go +++ b/lib/openapi/meta/meta_v1.go @@ -81,8 +81,8 @@ func (e *Processor) Return(c echo.Context, code int, obj map[string]any) error { func (e *Processor) DataGetList(c echo.Context, params types.DataGetListParams) error { var metadata map[string]any - res_int := c.Get("resource") - res, ok := res_int.(*types.Resource) + resInt := c.Get("resource") + res, ok := resInt.(*types.Resource) if !ok { e.Return(c, http.StatusNotFound, H{"message": "No data found"}) return fmt.Errorf("Unable to get resource from context") diff --git a/lib/openapi/openapi.go b/lib/openapi/openapi.go index 81eca76..0d604a6 100644 --- a/lib/openapi/openapi.go +++ b/lib/openapi/openapi.go @@ -64,7 +64,7 @@ func (cb *YamlBinder) Bind(i any, c echo.Context) (err error) { return } -func Init(fish *fish.Fish, api_address, ca_path, cert_path, key_path string) (*http.Server, error) { +func Init(fish *fish.Fish, apiAddress, caPath, certPath, keyPath string) (*http.Server, error) { swagger, err := GetSwagger() if err != nil { return nil, fmt.Errorf("Fish OpenAPI: Error loading swagger spec: %w", err) @@ -89,15 +89,15 @@ func Init(fish *fish.Fish, api_address, ca_path, cert_path, key_path string) (*h api.NewV1Router(router, fish) // TODO: web UI router - ca_pool := x509.NewCertPool() - if ca_bytes, err := os.ReadFile(ca_path); err == nil { - ca_pool.AppendCertsFromPEM(ca_bytes) + caPool := x509.NewCertPool() + if caBytes, err := os.ReadFile(caPath); err == nil { + caPool.AppendCertsFromPEM(caBytes) } s := router.TLSServer - s.Addr = api_address + s.Addr = apiAddress s.TLSConfig = &tls.Config{ // #nosec G402 , keep the compatibility high since not public access ClientAuth: tls.RequestClientCert, // Need for the client certificate auth - ClientCAs: ca_pool, // Verify client certificate with the cluster CA + ClientCAs: caPool, // Verify client certificate with the cluster CA } errChan := make(chan error) go func() { @@ -115,7 +115,7 @@ func Init(fish *fish.Fish, api_address, ca_path, cert_path, key_path string) (*h defer router.TLSListener.Close() - if err := s.ServeTLS(router.TLSListener, cert_path, key_path); err != http.ErrServerClosed { + if err := s.ServeTLS(router.TLSListener, certPath, keyPath); err != http.ErrServerClosed { errChan <- err log.Error("API: Unable to start listener:", err) } diff --git a/lib/openapi/types/node.go b/lib/openapi/types/node.go index 6957147..b632e3b 100644 --- a/lib/openapi/types/node.go +++ b/lib/openapi/types/node.go @@ -20,26 +20,26 @@ import ( "os" ) -const NODE_PING_DELAY = 10 +const NodePingDelay = 10 var ErrNodePingDuplication = fmt.Errorf("Fish Node: Unable to join the Aquarium cluster due to " + "the node with the same name pinged the cluster less then 2xNODE_PING_DELAY time ago") -func (n *Node) Init(node_address, cert_path string) error { +func (n *Node) Init(nodeAddress, certPath string) error { // Set the node external address - n.Address = node_address + n.Address = nodeAddress // Read certificate's pubkey to put or compare - cert_bytes, err := os.ReadFile(cert_path) + certBytes, err := os.ReadFile(certPath) if err != nil { return err } - block, _ := pem.Decode(cert_bytes) + block, _ := pem.Decode(certBytes) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { return err } - pubkey_der, err := x509.MarshalPKIXPublicKey(cert.PublicKey) + pubkeyDer, err := x509.MarshalPKIXPublicKey(cert.PublicKey) if err != nil { return err } @@ -48,10 +48,10 @@ func (n *Node) Init(node_address, cert_path string) error { // maybe later the process of key switch will be implemented if n.Pubkey == nil { // Set the pubkey once - n.Pubkey = &pubkey_der + n.Pubkey = &pubkeyDer } else { // Validate the existing pubkey - if !bytes.Equal(*n.Pubkey, pubkey_der) { + if !bytes.Equal(*n.Pubkey, pubkeyDer) { return fmt.Errorf("Fish Node: The pubkey was changed for Node, that's not supported") } } diff --git a/lib/openapi/types/resources.go b/lib/openapi/types/resources.go index e39d4d1..cce401a 100644 --- a/lib/openapi/types/resources.go +++ b/lib/openapi/types/resources.go @@ -49,7 +49,7 @@ func (r Resources) Value() (driver.Value, error) { return json.Marshal(r) } -func (r *Resources) Validate(disk_types []string, check_net bool) error { +func (r *Resources) Validate(diskTypes []string, checkNet bool) error { // Check resources if r.Cpu < 1 { return fmt.Errorf("Resources: Number of CPU cores is less then 1") @@ -61,8 +61,8 @@ func (r *Resources) Validate(disk_types []string, check_net bool) error { if name == "" { return fmt.Errorf("Resources: Disk name can't be empty") } - if len(disk_types) > 0 && !util.Contains(disk_types, disk.Type) { - return fmt.Errorf("Resources: Type of disk must be one of: %+q", disk_types) + if len(diskTypes) > 0 && !util.Contains(diskTypes, disk.Type) { + return fmt.Errorf("Resources: Type of disk must be one of: %+q", diskTypes) } if disk.Size < 1 { return fmt.Errorf("Resources: Size of the disk can't be less than 1GB") @@ -77,7 +77,7 @@ func (r *Resources) Validate(disk_types []string, check_net bool) error { } } } - if check_net && r.Network != "" && r.Network != "nat" { + if checkNet && r.Network != "" && r.Network != "nat" { return fmt.Errorf("Resources: The network configuration must be either '' (empty for hostonly) or 'nat'") } @@ -110,9 +110,9 @@ func (r *Resources) Subtract(res Resources) (err error) { r.Cpu -= res.Cpu } if r.Ram < res.Ram { - mem_err := fmt.Errorf("Resources: Unable to subtract more RAM than we have: %d < %d", r.Ram, res.Ram) + memErr := fmt.Errorf("Resources: Unable to subtract more RAM than we have: %d < %d", r.Ram, res.Ram) if err != nil { - err = fmt.Errorf("%v, %v", err, mem_err) + err = fmt.Errorf("%v, %v", err, memErr) } r.Ram = 0 } else { diff --git a/lib/proxy_socks/proxy.go b/lib/proxy_socks/proxy.go index 5138d4b..f1bcd97 100644 --- a/lib/proxy_socks/proxy.go +++ b/lib/proxy_socks/proxy.go @@ -49,16 +49,16 @@ func (p *ProxyAccess) Allow(ctx context.Context, req *socks5.Request) (context.C if dest == "" { dest = req.DestAddr.IP.String() } - over_dest := p.fish.ResourceServiceMapping(res, dest) - if over_dest == "" { + overDest := p.fish.ResourceServiceMapping(res, dest) + if overDest == "" { log.Warn("Proxy: Denied proxy from", req.RemoteAddr, "to", req.DestAddr) return ctx, false } // Resolve destination address if it's not an IP - req.DestAddr.IP = net.ParseIP(over_dest) + req.DestAddr.IP = net.ParseIP(overDest) if req.DestAddr.IP == nil { - req.DestAddr.FQDN = over_dest + req.DestAddr.FQDN = overDest addr, err := net.ResolveIPAddr("ip", req.DestAddr.FQDN) if err != nil { return ctx, false diff --git a/lib/proxy_ssh/proxy.go b/lib/proxy_ssh/proxy.go index 18187b6..cf822ae 100644 --- a/lib/proxy_ssh/proxy.go +++ b/lib/proxy_ssh/proxy.go @@ -94,11 +94,11 @@ func (p *ProxyAccess) serveConnection(conn net.Conn, serverConfig *ssh.ServerCon } // Go find the resource via its UID. - session_record, ok := value.(SessionRecord) + sessionRecord, ok := value.(SessionRecord) if !ok { return log.Errorf("Critical error retrieving session record (invalid type conversion).") } - resource, err := p.fish.ResourceGet(session_record.ResourceAccessor.ResourceUID) + resource, err := p.fish.ResourceGet(sessionRecord.ResourceAccessor.ResourceUID) if err != nil { return log.Errorf("Unable to retrieve resource: %v", err) } @@ -260,17 +260,17 @@ func (p *ProxyAccess) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh return nil, fmt.Errorf("invalid access") } -func Init(fish *fish.Fish, id_rsa_path string, address string) error { +func Init(fish *fish.Fish, idRsaPath string, address string) error { // First, try and read the file if it exists already. Otherwise, it is the // first execution, generate the private / public keys. The SSH server // requires at least one identity loaded to run. - privateBytes, err := os.ReadFile(id_rsa_path) + privateBytes, err := os.ReadFile(idRsaPath) if err != nil { // If it cannot be loaded, this is the first execution, generate it. - log.Infof("SSH Proxy: could not load %q, generating now.", id_rsa_path) + log.Infof("SSH Proxy: could not load %q, generating now.", idRsaPath) rsaKey, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { - return fmt.Errorf("proxy_ssh: could not generate private key %q: %w", id_rsa_path, err) + return fmt.Errorf("proxy_ssh: could not generate private key %q: %w", idRsaPath, err) } pemKey := pem.EncodeToMemory( &pem.Block{ @@ -279,14 +279,14 @@ func Init(fish *fish.Fish, id_rsa_path string, address string) error { }, ) // Write out the new key file and load into `privateBytes` again. - if err := os.WriteFile(id_rsa_path, pemKey, 0600); err != nil { - return fmt.Errorf("proxy_ssh: could not write %q: %w", id_rsa_path, err) + if err := os.WriteFile(idRsaPath, pemKey, 0600); err != nil { + return fmt.Errorf("proxy_ssh: could not write %q: %w", idRsaPath, err) } - privateBytes, err = os.ReadFile(id_rsa_path) + privateBytes, err = os.ReadFile(idRsaPath) if err != nil { return fmt.Errorf( "proxy_ssh: failed to load private key %q after generating: %w", - id_rsa_path, + idRsaPath, err, ) } @@ -297,13 +297,13 @@ func Init(fish *fish.Fish, id_rsa_path string, address string) error { return fmt.Errorf("proxy_ssh: failed to parse private key: %w", err) } - ssh_proxy := ProxyAccess{fish: fish} - ssh_proxy.serverConfig = &ssh.ServerConfig{ - PasswordCallback: ssh_proxy.passwordCallback, + sshProxy := ProxyAccess{fish: fish} + sshProxy.serverConfig = &ssh.ServerConfig{ + PasswordCallback: sshProxy.passwordCallback, } - ssh_proxy.serverConfig.AddHostKey(private) + sshProxy.serverConfig.AddHostKey(private) - go ssh_proxy.listenAndServe(address) + go sshProxy.listenAndServe(address) return nil } diff --git a/lib/util/dot_serialize.go b/lib/util/dot_serialize.go index 1eaf0da..0512ab5 100644 --- a/lib/util/dot_serialize.go +++ b/lib/util/dot_serialize.go @@ -24,12 +24,12 @@ func DotSerialize(prefix string, in any) map[string]string { v := reflect.ValueOf(in) if v.Kind() == reflect.Map { for _, k := range v.MapKeys() { - prefix_key := fmt.Sprintf("%v", k.Interface()) + prefixKey := fmt.Sprintf("%v", k.Interface()) if len(prefix) > 0 { - prefix_key = prefix + "." + prefix_key + prefixKey = prefix + "." + prefixKey } - int_out := DotSerialize(prefix_key, v.MapIndex(k).Interface()) - for key, val := range int_out { + intOut := DotSerialize(prefixKey, v.MapIndex(k).Interface()) + for key, val := range intOut { out[key] = val } } diff --git a/lib/util/expression_sql_filter_test.go b/lib/util/expression_sql_filter_test.go index 3b85eda..57005e4 100644 --- a/lib/util/expression_sql_filter_test.go +++ b/lib/util/expression_sql_filter_test.go @@ -18,7 +18,7 @@ import ( ) var ( - TEST_SQL_EXPRESSION_INJECTIONS = map[string]string{ + TestSqlExpressionInjections = map[string]string{ ``: ``, `1=1`: `1 = 1`, `id = 3; DROP users`: `"id" = 3`, @@ -30,7 +30,7 @@ var ( ) func Test_expression_sql_filter_where_injections(t *testing.T) { - for sql, result := range TEST_SQL_EXPRESSION_INJECTIONS { + for sql, result := range TestSqlExpressionInjections { t.Run(fmt.Sprintf("Testing `%s`", sql), func(t *testing.T) { out, err := ExpressionSqlFilter(sql) if out != result { diff --git a/lib/util/file_replace_block.go b/lib/util/file_replace_block.go index aa997d9..6f9718d 100644 --- a/lib/util/file_replace_block.go +++ b/lib/util/file_replace_block.go @@ -21,50 +21,50 @@ import ( "strings" ) -func FileReplaceBlock(path, block_from, block_to string, lines ...string) error { +func FileReplaceBlock(path, blockFrom, blockTo string, lines ...string) error { // Open input file - in_f, err := os.OpenFile(path, os.O_RDONLY, 0o644) + inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) if err != nil { return err } - defer in_f.Close() + defer inF.Close() // Check it's not a dir - if info, err := in_f.Stat(); err == nil && info.IsDir() { + if info, err := inF.Stat(); err == nil && info.IsDir() { return fmt.Errorf("Util: Unable to replace block in directory") } // Open output file - out_f, err := os.CreateTemp(filepath.Dir(path), "tmp") + outF, err := os.CreateTemp(filepath.Dir(path), "tmp") if err != nil { return err } - defer out_f.Close() + defer outF.Close() // Replace while copying - sc := bufio.NewScanner(in_f) - found_from := false + sc := bufio.NewScanner(inF) + foundFrom := false replaced := false for sc.Scan() { line := sc.Text() if replaced { - if _, err := io.WriteString(out_f, line+"\n"); err != nil { + if _, err := io.WriteString(outF, line+"\n"); err != nil { return err } continue } - if !found_from { - if strings.Contains(line, block_from) { - found_from = true + if !foundFrom { + if strings.Contains(line, blockFrom) { + foundFrom = true continue } - if _, err := io.WriteString(out_f, line+"\n"); err != nil { + if _, err := io.WriteString(outF, line+"\n"); err != nil { return err } } else { - if strings.Contains(line, block_to) { + if strings.Contains(line, blockTo) { for _, l := range lines { - if _, err := io.WriteString(out_f, l+"\n"); err != nil { + if _, err := io.WriteString(outF, l+"\n"); err != nil { return err } } @@ -77,26 +77,26 @@ func FileReplaceBlock(path, block_from, block_to string, lines ...string) error } // Add in the end if was not replaced - if found_from && !replaced { + if foundFrom && !replaced { for _, l := range lines { - if _, err := io.WriteString(out_f, l+"\n"); err != nil { + if _, err := io.WriteString(outF, l+"\n"); err != nil { return err } } } // Close the out file - if err := out_f.Close(); err != nil { + if err := outF.Close(); err != nil { return err } // Close the input file - if err := in_f.Close(); err != nil { + if err := inF.Close(); err != nil { return err } // Replace input file with out file - if err := os.Rename(out_f.Name(), path); err != nil { + if err := os.Rename(outF.Name(), path); err != nil { return err } diff --git a/lib/util/file_replace_token.go b/lib/util/file_replace_token.go index 405eb07..68a56a1 100644 --- a/lib/util/file_replace_token.go +++ b/lib/util/file_replace_token.go @@ -21,32 +21,32 @@ import ( "strings" ) -func FileReplaceToken(path string, full_line, add, anycase bool, token_values ...string) error { +func FileReplaceToken(path string, fullLine, add, anycase bool, tokenValues ...string) error { // Open input file - in_f, err := os.OpenFile(path, os.O_RDONLY, 0o644) + inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) if err != nil { return err } - defer in_f.Close() + defer inF.Close() // Check it's not a dir - if info, err := in_f.Stat(); err == nil && info.IsDir() { + if info, err := inF.Stat(); err == nil && info.IsDir() { return fmt.Errorf("Util: Unable to replace token in directory") } // Open output file - out_f, err := os.CreateTemp(filepath.Dir(path), "tmp") + outF, err := os.CreateTemp(filepath.Dir(path), "tmp") if err != nil { return err } - defer out_f.Close() + defer outF.Close() var tokens []string var values []string // Walking through the list of tokens to split them into pairs // 0 - key, 1 - value - for i, tv := range token_values { + for i, tv := range tokenValues { if i%2 == 0 { if anycase { tokens = append(tokens, strings.ToLower(tv)) @@ -61,17 +61,17 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. replaced := make([]bool, len(values)) // Replace while copying - sc := bufio.NewScanner(in_f) + sc := bufio.NewScanner(inF) for sc.Scan() { line := sc.Text() - comp_line := line + compLine := line if anycase { - comp_line = strings.ToLower(line) + compLine = strings.ToLower(line) } for i, value := range values { - if strings.Contains(comp_line, tokens[i]) { + if strings.Contains(compLine, tokens[i]) { replaced[i] = true - if full_line { + if fullLine { line = value break // No need to check the other tokens } else { @@ -79,12 +79,12 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. // We're not using RE because it's hard to predict the token // and escape it to compile the proper regular expression // so instead we using just regular replace by position of the token - idx := strings.Index(comp_line, tokens[i]) + idx := strings.Index(compLine, tokens[i]) for idx != -1 { // To support unicode use runes line = string([]rune(line)[0:idx]) + value + string([]rune(line)[idx+len(tokens[i]):len(line)]) - comp_line = strings.ToLower(line) - idx = strings.Index(comp_line, tokens[i]) + compLine = strings.ToLower(line) + idx = strings.Index(compLine, tokens[i]) } } else { line = strings.ReplaceAll(line, tokens[i], value) @@ -93,7 +93,7 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. } } // Probably not the best way to assume there was just \n - if _, err := io.WriteString(out_f, line+"\n"); err != nil { + if _, err := io.WriteString(outF, line+"\n"); err != nil { return err } } @@ -105,7 +105,7 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. if add { for i, value := range values { if !replaced[i] { - if _, err := io.WriteString(out_f, value+"\n"); err != nil { + if _, err := io.WriteString(outF, value+"\n"); err != nil { return err } } @@ -113,17 +113,17 @@ func FileReplaceToken(path string, full_line, add, anycase bool, token_values .. } // Close the out file - if err := out_f.Close(); err != nil { + if err := outF.Close(); err != nil { return err } // Close the input file - if err := in_f.Close(); err != nil { + if err := inF.Close(); err != nil { return err } // Replace input file with out file - if err := os.Rename(out_f.Name(), path); err != nil { + if err := os.Rename(outF.Name(), path); err != nil { return err } diff --git a/lib/util/file_replace_token_test.go b/lib/util/file_replace_token_test.go index ebe1470..78bc5cb 100644 --- a/lib/util/file_replace_token_test.go +++ b/lib/util/file_replace_token_test.go @@ -20,234 +20,234 @@ import ( ) func Test_file_replace_token_simple_proceed(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_simple_skip_uppercase_src(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_simple_skip_uppercase_token(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_anycase_token_proceed(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, true, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_anycase_src_proceed(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, true, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_anycase_multiple(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, false, true, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_add(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n" + "test5\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, true, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_do_not_add_if_replaced(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, false, true, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } func Test_file_replace_token_full_line(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test6\n" + "test7 test8 test9\n") - out_data := []byte("" + + outData := []byte("" + "test1 test2 test3\n" + "test5\n" + "test7 test8 test9\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - FileReplaceToken(tmp_file, + FileReplaceToken(tmpFile, true, false, false, "", "test5", ) - body, err := os.ReadFile(tmp_file) + body, err := os.ReadFile(tmpFile) - if err != nil || !bytes.Equal(body, out_data) { - t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, out_data) + if err != nil || !bytes.Equal(body, outData) { + t.Fatalf(`FileReplaceToken("", "test5") = %q, %v, want: %q, error`, body, err, outData) } } diff --git a/lib/util/file_starts_with.go b/lib/util/file_starts_with.go index 437c18c..878af3a 100644 --- a/lib/util/file_starts_with.go +++ b/lib/util/file_starts_with.go @@ -26,19 +26,19 @@ var ( func FileStartsWith(path string, prefix []byte) error { // Open input file - in_f, err := os.OpenFile(path, os.O_RDONLY, 0o644) + inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) if err != nil { return err } - defer in_f.Close() + defer inF.Close() // Check it's not a dir - if info, err := in_f.Stat(); err == nil && info.IsDir() { + if info, err := inF.Stat(); err == nil && info.IsDir() { return ErrFileStartsWithDirectory } buf := make([]byte, len(prefix)) - length, err := in_f.Read(buf) + length, err := inF.Read(buf) if err != nil { return err } diff --git a/lib/util/file_starts_with_test.go b/lib/util/file_starts_with_test.go index 80cde10..3d6e4ec 100644 --- a/lib/util/file_starts_with_test.go +++ b/lib/util/file_starts_with_test.go @@ -19,49 +19,49 @@ import ( ) func TestFileStartsWithGood(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - if err := FileStartsWith(tmp_file, []byte("test1 ")); err != nil { + if err := FileStartsWith(tmpFile, []byte("test1 ")); err != nil { t.Fatalf(`FileStartsWith("test1 ") = %v, want: nil`, err) } } func TestFileStartsNotEqual(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("" + + inData := []byte("" + "test1 test2 test3\n" + "test4 test5 test6\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - if err := FileStartsWith(tmp_file, []byte("test2 ")); err != ErrFileStartsWithNotEqual { + if err := FileStartsWith(tmpFile, []byte("test2 ")); err != ErrFileStartsWithNotEqual { t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithNotEqual) } } func TestFileStartsDirectory(t *testing.T) { - tmp_file := t.TempDir() + tmpFile := t.TempDir() - if err := FileStartsWith(tmp_file, []byte("test2 ")); err != ErrFileStartsWithDirectory { + if err := FileStartsWith(tmpFile, []byte("test2 ")); err != ErrFileStartsWithDirectory { t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithDirectory) } } func TestFileStartsSmall(t *testing.T) { - tmp_file := path.Join(t.TempDir(), "test.txt") + tmpFile := path.Join(t.TempDir(), "test.txt") - in_data := []byte("small file\n") + inData := []byte("small file\n") - os.WriteFile(tmp_file, in_data, 0o644) + os.WriteFile(tmpFile, inData, 0o644) - if err := FileStartsWith(tmp_file, []byte("biiiiiiiiiig prefix")); err != ErrFileStartsWithFileTooSmall { + if err := FileStartsWith(tmpFile, []byte("biiiiiiiiiig prefix")); err != ErrFileStartsWithFileTooSmall { t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithFileTooSmall) } } diff --git a/lib/util/human_size.go b/lib/util/human_size.go index f3b1bd0..e75ce8d 100644 --- a/lib/util/human_size.go +++ b/lib/util/human_size.go @@ -52,13 +52,13 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { // Detecting unit & multiplier var mult HumanSize = 0 var unit string - var unit_len int + var unitLen int if length > 1 { unit = input[length-2:] - unit_len = 2 + unitLen = 2 } else { unit = input - unit_len = length + unitLen = length } switch unit { case "KB": @@ -77,14 +77,14 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { // Could be something incorrect, B or number - so bytes if unit[0] >= '0' && unit[0] <= '9' { // It's byte - if unit_len > 1 { + if unitLen > 1 { if unit[1] == 'B' { - unit_len = 1 + unitLen = 1 } else if unit[1] >= '0' && unit[1] <= '9' { - unit_len = 0 + unitLen = 0 } } else { - unit_len = 0 + unitLen = 0 } mult = B } @@ -94,7 +94,7 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { } // Detecting value - val, err := strconv.ParseUint(input[:length-unit_len], 10, 64) + val, err := strconv.ParseUint(input[:length-unitLen], 10, 64) if err != nil { return fmt.Errorf("Unable to parse provided human size value: %s", input) } diff --git a/lib/util/human_size_test.go b/lib/util/human_size_test.go index 1f11684..aa5ccee 100644 --- a/lib/util/human_size_test.go +++ b/lib/util/human_size_test.go @@ -18,7 +18,7 @@ import ( ) var ( - TEST_HUMAN_SIZE_PARSE_STRING = [][2]string{ + TestHumanSizeParseString = [][2]string{ {`0`, `0B`}, {`0B`, `0B`}, {`0EB`, `0B`}, @@ -42,7 +42,7 @@ var ( // Verify all the inputs will be parsed correctly func Test_human_size_parse_string(t *testing.T) { - for _, testcase := range TEST_HUMAN_SIZE_PARSE_STRING { + for _, testcase := range TestHumanSizeParseString { t.Run(fmt.Sprintf("Testing `%s`", testcase[0]), func(t *testing.T) { out, err := NewHumanSize(testcase[0]) if out.String() != testcase[1] { diff --git a/lib/util/lock.go b/lib/util/lock.go index 93a196a..56751c7 100644 --- a/lib/util/lock.go +++ b/lib/util/lock.go @@ -26,53 +26,53 @@ import ( ) // The function creates the lock file, notice - remove it yourself -func CreateLock(lock_path string) error { - lock_file, err := os.Create(lock_path) +func CreateLock(lockPath string) error { + lockFile, err := os.Create(lockPath) if err != nil { - return log.Error("Util: Unable to create the lock file:", lock_path) + return log.Error("Util: Unable to create the lock file:", lockPath) } // Writing pid into the file for additional info data := []byte(fmt.Sprintf("%d", os.Getpid())) - lock_file.Write(data) - lock_file.Close() + lockFile.Write(data) + lockFile.Close() return nil } // Wait for the lock file and clean func will be executed if it's invalid -func WaitLock(lock_path string, clean func()) error { - wait_counter := 0 +func WaitLock(lockPath string, clean func()) error { + waitCounter := 0 for { - if _, err := os.Stat(lock_path); os.IsNotExist(err) { + if _, err := os.Stat(lockPath); os.IsNotExist(err) { break } - if wait_counter%6 == 0 { + if waitCounter%6 == 0 { // Read the lock file to print the pid - if lock_info, err := os.ReadFile(lock_path); err == nil { + if lockInfo, err := os.ReadFile(lockPath); err == nil { // Check the pid is running - because if the app crashes // it can leave the lock file (weak protection but worth it) - pid, err := strconv.ParseInt(strings.SplitN(string(lock_info), " ", 2)[0], 10, bits.UintSize) + pid, err := strconv.ParseInt(strings.SplitN(string(lockInfo), " ", 2)[0], 10, bits.UintSize) if err != nil || pid < 0 || pid > math.MaxInt32 { // No valid pid in the lock file - it's actually a small chance it's create or // write delay, but it's so small I want to ignore it - log.Warnf("Util: Lock file doesn't contain pid of the process '%s': %s - %v", lock_path, lock_info, err) + log.Warnf("Util: Lock file doesn't contain pid of the process '%s': %s - %v", lockPath, lockInfo, err) clean() - os.Remove(lock_path) + os.Remove(lockPath) break } if proc, err := os.FindProcess(int(pid)); err != nil || proc.Signal(syscall.Signal(0)) != nil { - log.Warnf("Util: No process running for lock file '%s': %s", lock_path, lock_info) + log.Warnf("Util: No process running for lock file '%s': %s", lockPath, lockInfo) clean() - os.Remove(lock_path) + os.Remove(lockPath) break } - log.Debugf("Util: Waiting for '%s', pid %s", lock_path, lock_info) + log.Debugf("Util: Waiting for '%s', pid %s", lockPath, lockInfo) } } time.Sleep(5 * time.Second) - wait_counter += 1 + waitCounter += 1 } return nil diff --git a/lib/util/passthrough_monitor.go b/lib/util/passthrough_monitor.go index e29bbf3..d5b1164 100644 --- a/lib/util/passthrough_monitor.go +++ b/lib/util/passthrough_monitor.go @@ -30,7 +30,7 @@ type PassThruMonitor struct { total int64 progress float64 - print_ts time.Time + printTs time.Time } // Read 'overrides' the underlying io.Reader's Read method. @@ -41,11 +41,11 @@ func (pt *PassThruMonitor) Read(p []byte) (int, error) { if n > 0 { pt.total += int64(n) percentage := float64(pt.total) / float64(pt.Length) * float64(100) - if percentage-pt.progress > 10 || time.Since(pt.print_ts) > 30*time.Second { + if percentage-pt.progress > 10 || time.Since(pt.printTs) > 30*time.Second { // Show status every 10% or 30 sec log.Infof("%s: %v%% (%dB)", pt.Name, int(percentage), pt.total) pt.progress = percentage - pt.print_ts = time.Now() + pt.printTs = time.Now() } } diff --git a/lib/util/streamlog_monitor.go b/lib/util/streamlog_monitor.go index aee40b2..e4b7345 100644 --- a/lib/util/streamlog_monitor.go +++ b/lib/util/streamlog_monitor.go @@ -30,9 +30,9 @@ type StreamLogMonitor struct { // Read 'overrides' the underlying io.Reader's Read method func (slm *StreamLogMonitor) Write(p []byte) (int, error) { index := 0 - prev_index := 0 + prevIndex := 0 for index < len(p) { - index += bytes.Index(p[prev_index:], LineBreak) + index += bytes.Index(p[prevIndex:], LineBreak) if index == -1 { // The data does not contain EOL, so appending to buffer and wait slm.linebuf = append(slm.linebuf, p) @@ -40,11 +40,11 @@ func (slm *StreamLogMonitor) Write(p []byte) (int, error) { } // The newline was found, so prepending the line buffer and print it out // We don't need the EOF in the line (log.Infof adds), so increment index after processing - slm.linebuf = append(slm.linebuf, p[prev_index:index]) + slm.linebuf = append(slm.linebuf, p[prevIndex:index]) log.Info(slm.Prefix + string(bytes.Join(slm.linebuf, EmptyByte))) clear(slm.linebuf) index++ - prev_index = index + prevIndex = index } return len(p), nil diff --git a/lib/util/unparsed_json.go b/lib/util/unparsed_json.go index 14e9944..d5cc273 100644 --- a/lib/util/unparsed_json.go +++ b/lib/util/unparsed_json.go @@ -36,10 +36,10 @@ func (r *UnparsedJson) UnmarshalYAML(node *yaml.Node) error { if err := node.Decode(&value); err != nil { return err } - json_data, err := json.Marshal(value) + jsonData, err := json.Marshal(value) if err != nil { return err } - r.UnmarshalJSON(json_data) + r.UnmarshalJSON(jsonData) return nil } diff --git a/tests/allocate_apps_stress_test.go b/tests/allocate_apps_stress_test.go index 9353365..344433b 100644 --- a/tests/allocate_apps_stress_test.go +++ b/tests/allocate_apps_stress_test.go @@ -85,12 +85,12 @@ drivers: wg := &sync.WaitGroup{} for i := 0; i < 50; i++ { wg.Add(1) - go allocate_apps_stress_worker(t, wg, i, afi, label.UID.String()) + go allocateAppsStressWorker(t, wg, i, afi, label.UID.String()) } wg.Wait() } -func allocate_apps_stress_worker(t *testing.T, wg *sync.WaitGroup, id int, afi *h.AFInstance, label string) { +func allocateAppsStressWorker(t *testing.T, wg *sync.WaitGroup, id int, afi *h.AFInstance, label string) { defer wg.Done() tr := &http.Transport{ diff --git a/tests/allocate_multidefinition_label_test.go b/tests/allocate_multidefinition_label_test.go index a89916d..8915fb1 100644 --- a/tests/allocate_multidefinition_label_test.go +++ b/tests/allocate_multidefinition_label_test.go @@ -97,7 +97,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -107,10 +107,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -153,10 +153,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -207,10 +207,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -252,10 +252,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -307,10 +307,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -352,10 +352,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/application_task_notexisting_fail_test.go b/tests/application_task_notexisting_fail_test.go index 434b177..8c67226 100644 --- a/tests/application_task_notexisting_fail_test.go +++ b/tests/application_task_notexisting_fail_test.go @@ -89,7 +89,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -99,15 +99,15 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) - var app_task types.ApplicationTask + var appTask types.ApplicationTask t.Run("Create ApplicationTask Snapshot", func(t *testing.T) { apitest.New(). EnableNetworking(cli). @@ -117,14 +117,14 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_task) + JSON(&appTask) - if app_task.UID == uuid.Nil { - t.Fatalf("ApplicationTask UID is incorrect: %v", app_task.UID) + if appTask.UID == uuid.Nil { + t.Fatalf("ApplicationTask UID is incorrect: %v", appTask.UID) } }) - var app_tasks []types.ApplicationTask + var appTasks []types.ApplicationTask t.Run("ApplicationTask should be executed as not found in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -134,16 +134,16 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_tasks) + JSON(&appTasks) - if len(app_tasks) != 1 { + if len(appTasks) != 1 { r.Fatalf("Application Tasks list is empty") } - if app_tasks[0].UID != app_task.UID { - r.Fatalf("ApplicationTask UID is incorrect: %v != %v", app_tasks[0].UID, app_task.UID) + if appTasks[0].UID != appTask.UID { + r.Fatalf("ApplicationTask UID is incorrect: %v != %v", appTasks[0].UID, appTask.UID) } - if string(app_tasks[0].Result) != `{"error":"task not availble in driver"}` { - r.Fatalf("ApplicationTask result is incorrect: %v", app_tasks[0].Result) + if string(appTasks[0].Result) != `{"error":"task not availble in driver"}` { + r.Fatalf("ApplicationTask result is incorrect: %v", appTasks[0].Result) } }) }) @@ -167,10 +167,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/application_task_snapshot_by_user_test.go b/tests/application_task_snapshot_by_user_test.go index 9bd0636..d292629 100644 --- a/tests/application_task_snapshot_by_user_test.go +++ b/tests/application_task_snapshot_by_user_test.go @@ -105,7 +105,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -115,10 +115,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -139,7 +139,7 @@ drivers: } }) - var app_task1 types.ApplicationTask + var appTask1 types.ApplicationTask t.Run("Create ApplicationTask 1 Snapshot on ALLOCATE", func(t *testing.T) { apitest.New(). EnableNetworking(cli). @@ -149,14 +149,14 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_task1) + JSON(&appTask1) - if app_task1.UID == uuid.Nil { - t.Fatalf("ApplicationTask 1 UID is incorrect: %v", app_task1.UID) + if appTask1.UID == uuid.Nil { + t.Fatalf("ApplicationTask 1 UID is incorrect: %v", appTask1.UID) } }) - var app_task2 types.ApplicationTask + var appTask2 types.ApplicationTask t.Run("Create ApplicationTask 2 Snapshot on DEALLOCATE", func(t *testing.T) { apitest.New(). EnableNetworking(cli). @@ -166,14 +166,14 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_task2) + JSON(&appTask2) - if app_task2.UID == uuid.Nil { - t.Fatalf("ApplicationTask 2 UID is incorrect: %v", app_task2.UID) + if appTask2.UID == uuid.Nil { + t.Fatalf("ApplicationTask 2 UID is incorrect: %v", appTask2.UID) } }) - var app_tasks []types.ApplicationTask + var appTasks []types.ApplicationTask t.Run("ApplicationTask 1 should be executed in 10 sec and 2 should not be executed", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -183,22 +183,22 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_tasks) + JSON(&appTasks) - if len(app_tasks) != 2 { + if len(appTasks) != 2 { r.Fatalf("Application Tasks list does not contain 2 tasks") } - if app_tasks[0].UID != app_task1.UID { - r.Fatalf("ApplicationTask 1 UID is incorrect: %v != %v", app_tasks[0].UID, app_task1.UID) + if appTasks[0].UID != appTask1.UID { + r.Fatalf("ApplicationTask 1 UID is incorrect: %v != %v", appTasks[0].UID, appTask1.UID) } - if app_tasks[1].UID != app_task2.UID { - r.Fatalf("ApplicationTask 2 UID is incorrect: %v != %v", app_tasks[1].UID, app_task2.UID) + if appTasks[1].UID != appTask2.UID { + r.Fatalf("ApplicationTask 2 UID is incorrect: %v != %v", appTasks[1].UID, appTask2.UID) } - if string(app_tasks[0].Result) != `{"snapshots":["test-snapshot"],"when":"ALLOCATED"}` { - r.Fatalf("ApplicationTask 1 result is incorrect: %v", app_tasks[0].Result) + if string(appTasks[0].Result) != `{"snapshots":["test-snapshot"],"when":"ALLOCATED"}` { + r.Fatalf("ApplicationTask 1 result is incorrect: %v", appTasks[0].Result) } - if string(app_tasks[1].Result) != `{}` { - r.Fatalf("ApplicationTask 2 result is incorrect: %v", app_tasks[1].Result) + if string(appTasks[1].Result) != `{}` { + r.Fatalf("ApplicationTask 2 result is incorrect: %v", appTasks[1].Result) } }) }) @@ -222,16 +222,16 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_tasks) + JSON(&appTasks) - if len(app_tasks) != 2 { + if len(appTasks) != 2 { r.Fatalf("Application Tasks list does not contain 2 tasks") } - if app_tasks[1].UID != app_task2.UID { - r.Fatalf("ApplicationTask 2 UID is incorrect: %v != %v", app_tasks[1].UID, app_task2.UID) + if appTasks[1].UID != appTask2.UID { + r.Fatalf("ApplicationTask 2 UID is incorrect: %v != %v", appTasks[1].UID, appTask2.UID) } - if string(app_tasks[1].Result) != `{"snapshots":["test-snapshot"],"when":"DEALLOCATE"}` { - r.Fatalf("ApplicationTask 2 result is incorrect: %v", app_tasks[1].Result) + if string(appTasks[1].Result) != `{"snapshots":["test-snapshot"],"when":"DEALLOCATE"}` { + r.Fatalf("ApplicationTask 2 result is incorrect: %v", appTasks[1].Result) } }) }) @@ -245,10 +245,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/cant_allocate_too_big_label_test.go b/tests/cant_allocate_too_big_label_test.go index 8f50536..8803f67 100644 --- a/tests/cant_allocate_too_big_label_test.go +++ b/tests/cant_allocate_too_big_label_test.go @@ -98,7 +98,7 @@ drivers: time.Sleep(10 * time.Second) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should have state NEW in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). @@ -107,10 +107,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -124,10 +124,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -141,10 +141,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -158,10 +158,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -184,10 +184,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusRECALLED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusRECALLED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/default_lifetime_timeout_test.go b/tests/default_lifetime_timeout_test.go index 42aa60a..4e39404 100644 --- a/tests/default_lifetime_timeout_test.go +++ b/tests/default_lifetime_timeout_test.go @@ -92,7 +92,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -102,10 +102,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -120,10 +120,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -136,10 +136,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/generated_uids_prefix_is_node_prefix_test.go b/tests/generated_uids_prefix_is_node_prefix_test.go index 9aa3c3e..6a95c75 100644 --- a/tests/generated_uids_prefix_is_node_prefix_test.go +++ b/tests/generated_uids_prefix_is_node_prefix_test.go @@ -124,7 +124,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -134,18 +134,18 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.UID == uuid.Nil { - t.Fatalf("ApplicationState UID is incorrect: %v", app_state.UID) + if appState.UID == uuid.Nil { + t.Fatalf("ApplicationState UID is incorrect: %v", appState.UID) } - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } - if !bytes.Equal(app_state.UID[:6], node.UID[:6]) { - t.Fatalf("ApplicationState UID prefix != Node UID prefix: %v, %v", app_state.UID, node.UID) + if !bytes.Equal(appState.UID[:6], node.UID[:6]) { + t.Fatalf("ApplicationState UID prefix != Node UID prefix: %v, %v", appState.UID, node.UID) } }) }) @@ -193,10 +193,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/helper/fish.go b/tests/helper/fish.go index cb93786..3b2a35d 100644 --- a/tests/helper/fish.go +++ b/tests/helper/fish.go @@ -24,7 +24,7 @@ import ( "time" ) -var fish_path = os.Getenv("FISH_PATH") // Full path to the aquarium-fish binary +var fishPath = os.Getenv("FISH_PATH") // Full path to the aquarium-fish binary // Saves state of the running Aquarium Fish for particular test type AFInstance struct { @@ -33,9 +33,9 @@ type AFInstance struct { running bool cmd *exec.Cmd - node_name string - endpoint string - admin_token string + nodeName string + endpoint string + adminToken string } // Simple creates and run the fish node @@ -52,13 +52,13 @@ func NewAfInstance(tb testing.TB, name, cfg string) *AFInstance { tb.Helper() tb.Log("INFO: Creating new node:", name) afi := &AFInstance{ - node_name: name, + nodeName: name, } afi.workspace = tb.TempDir() - tb.Log("INFO: Created workspace:", afi.node_name, afi.workspace) + tb.Log("INFO: Created workspace:", afi.nodeName, afi.workspace) - cfg += fmt.Sprintf("\nnode_name: %q", afi.node_name) + cfg += fmt.Sprintf("\nnode_name: %q", afi.nodeName) os.WriteFile(filepath.Join(afi.workspace, "config.yml"), []byte(cfg), 0o600) tb.Log("INFO: Stored config:", cfg) @@ -78,7 +78,7 @@ func (afi1 *AFInstance) NewClusterNode(tb testing.TB, name, cfg string, args ... // Just create the node based on the existing cluster node func (afi1 *AFInstance) NewAfInstanceCluster(tb testing.TB, name, cfg string) *AFInstance { tb.Helper() - tb.Log("INFO: Creating new cluster node with seed node:", afi1.node_name) + tb.Log("INFO: Creating new cluster node with seed node:", afi1.nodeName) cfg += fmt.Sprintf("\ncluster_join: [%q]", afi1.endpoint) afi2 := NewAfInstance(tb, name, cfg) @@ -110,7 +110,7 @@ func (afi *AFInstance) Workspace() string { // Returns admin token func (afi *AFInstance) AdminToken() string { - return afi.admin_token + return afi.adminToken } // Check the fish instance is running @@ -121,7 +121,7 @@ func (afi *AFInstance) IsRunning() bool { // Restart the application func (afi *AFInstance) Restart(tb testing.TB, args ...string) { tb.Helper() - tb.Log("INFO: Restarting:", afi.node_name, afi.workspace) + tb.Log("INFO: Restarting:", afi.nodeName, afi.workspace) afi.Stop(tb) afi.Start(tb, args...) } @@ -129,7 +129,7 @@ func (afi *AFInstance) Restart(tb testing.TB, args ...string) { // Cleanup after the test execution func (afi *AFInstance) Cleanup(tb testing.TB) { tb.Helper() - tb.Log("INFO: Cleaning up:", afi.node_name, afi.workspace) + tb.Log("INFO: Cleaning up:", afi.nodeName, afi.workspace) afi.Stop(tb) os.RemoveAll(afi.workspace) } @@ -144,7 +144,7 @@ func (afi *AFInstance) Stop(tb testing.TB) { afi.cmd.Process.Signal(os.Interrupt) // Wait 10 seconds for process to stop - tb.Log("INFO: Wait 10s for fish node to stop:", afi.node_name, afi.workspace) + tb.Log("INFO: Wait 10s for fish node to stop:", afi.nodeName, afi.workspace) for i := 1; i < 20; i++ { if !afi.running { return @@ -160,39 +160,39 @@ func (afi *AFInstance) Stop(tb testing.TB) { func (afi *AFInstance) Start(tb testing.TB, args ...string) { tb.Helper() if afi.running { - tb.Fatalf("ERROR: Fish node %q can't be started since already started", afi.node_name) + tb.Fatalf("ERROR: Fish node %q can't be started since already started", afi.nodeName) return } ctx, cancel := context.WithCancel(context.Background()) afi.fishKill = cancel - cmd_args := []string{"-v", "debug", "-c", filepath.Join(afi.workspace, "config.yml")} - cmd_args = append(cmd_args, args...) - afi.cmd = exec.CommandContext(ctx, fish_path, cmd_args...) + cmdArgs := []string{"-v", "debug", "-c", filepath.Join(afi.workspace, "config.yml")} + cmdArgs = append(cmdArgs, args...) + afi.cmd = exec.CommandContext(ctx, fishPath, cmdArgs...) afi.cmd.Dir = afi.workspace r, _ := afi.cmd.StdoutPipe() afi.cmd.Stderr = afi.cmd.Stdout - init_done := make(chan string) + initDone := make(chan string) scanner := bufio.NewScanner(r) // TODO: Add timeout for waiting of API available go func() { // Listening for log and scan for token and address for scanner.Scan() { line := scanner.Text() - tb.Log(afi.node_name, line) + tb.Log(afi.nodeName, line) if strings.HasPrefix(line, "Admin user pass: ") { val := strings.SplitN(strings.TrimSpace(line), "Admin user pass: ", 2) if len(val) < 2 { - init_done <- "ERROR: No token after 'Admin user pass: '" + initDone <- "ERROR: No token after 'Admin user pass: '" break } - afi.admin_token = val[1] + afi.adminToken = val[1] } if strings.Contains(line, "API listening on: ") { val := strings.SplitN(strings.TrimSpace(line), "API listening on: ", 2) if len(val) < 2 { - init_done <- "ERROR: No address after 'API listening on: '" + initDone <- "ERROR: No address after 'API listening on: '" break } afi.endpoint = val[1] @@ -200,7 +200,7 @@ func (afi *AFInstance) Start(tb testing.TB, args ...string) { if strings.HasSuffix(line, "Fish initialized") { // Found the needed values and continue to process to print the fish output for // test debugging purposes - init_done <- "" + initDone <- "" } } tb.Log("INFO: Reading of AquariumFish output is done") @@ -216,13 +216,13 @@ func (afi *AFInstance) Start(tb testing.TB, args ...string) { }() if err := afi.cmd.Wait(); err != nil { tb.Log("WARN: AquariumFish process was stopped:", err) - init_done <- fmt.Sprintf("ERROR: Fish was stopped with exit code: %v", err) + initDone <- fmt.Sprintf("ERROR: Fish was stopped with exit code: %v", err) } }() - failed := <-init_done + failed := <-initDone if failed != "" { - tb.Fatalf("ERROR: Failed to init node %q: %s", afi.node_name, failed) + tb.Fatalf("ERROR: Failed to init node %q: %s", afi.nodeName, failed) } } diff --git a/tests/helper/t_mock.go b/tests/helper/t_mock.go index d7b13b7..18e1525 100644 --- a/tests/helper/t_mock.go +++ b/tests/helper/t_mock.go @@ -53,16 +53,16 @@ func (m *MockT) Fatalf(format string, args ...any) { func ExpectFailure(t *testing.T, f func(tt testing.TB)) { t.Helper() var wg sync.WaitGroup - mock_t := &MockT{t: t} + mockT := &MockT{t: t} wg.Add(1) go func() { defer wg.Done() - f(mock_t) + f(mockT) }() wg.Wait() - if !mock_t.FailNowCalled { + if !mockT.FailNowCalled { t.Fatalf("ExpectFailure: the function did not fail as expected") } } diff --git a/tests/label_lifetime_timeout_test.go b/tests/label_lifetime_timeout_test.go index ac28315..fe1788d 100644 --- a/tests/label_lifetime_timeout_test.go +++ b/tests/label_lifetime_timeout_test.go @@ -91,7 +91,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -101,10 +101,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -119,10 +119,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -135,10 +135,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/label_overrides_default_lifetime_timeout_test.go b/tests/label_overrides_default_lifetime_timeout_test.go index a5cd8b0..538ad4d 100644 --- a/tests/label_overrides_default_lifetime_timeout_test.go +++ b/tests/label_overrides_default_lifetime_timeout_test.go @@ -92,7 +92,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -102,10 +102,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -120,10 +120,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -136,10 +136,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/maintenance_mode_test.go b/tests/maintenance_mode_test.go index f6c2fe5..2ff4977 100644 --- a/tests/maintenance_mode_test.go +++ b/tests/maintenance_mode_test.go @@ -104,7 +104,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should stay NEW for 20 sec", func(t *testing.T) { time.Sleep(20) @@ -115,10 +115,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -142,10 +142,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/multiple_driver_instances_test.go b/tests/multiple_driver_instances_test.go index 500ab9f..dfe15bc 100644 --- a/tests/multiple_driver_instances_test.go +++ b/tests/multiple_driver_instances_test.go @@ -101,7 +101,7 @@ drivers: time.Sleep(10 * time.Second) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should have state NEW in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). @@ -110,10 +110,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -127,10 +127,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -144,10 +144,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -161,10 +161,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) @@ -187,10 +187,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusRECALLED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusRECALLED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -236,10 +236,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -263,10 +263,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/node_filter_test.go b/tests/node_filter_test.go index 103a90b..50220d3 100644 --- a/tests/node_filter_test.go +++ b/tests/node_filter_test.go @@ -95,7 +95,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -105,10 +105,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -184,7 +184,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -194,10 +194,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -272,7 +272,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should not get ALLOCATED in 10 sec", func(t *testing.T) { time.Sleep(10 * time.Second) @@ -283,10 +283,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) } @@ -359,7 +359,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should not get ALLOCATED in 10 sec", func(t *testing.T) { time.Sleep(10 * time.Second) @@ -370,10 +370,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application Status is incorrect: %v", appState.Status) } }) } diff --git a/tests/shutdown_mode_test.go b/tests/shutdown_mode_test.go index 8e68b9a..031c9cf 100644 --- a/tests/shutdown_mode_test.go +++ b/tests/shutdown_mode_test.go @@ -94,7 +94,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -104,10 +104,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -215,7 +215,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -225,10 +225,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -321,7 +321,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -331,10 +331,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -436,7 +436,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -446,10 +446,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -503,10 +503,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/simple_app_create_destroy_test.go b/tests/simple_app_create_destroy_test.go index 3af26b2..01dd316 100644 --- a/tests/simple_app_create_destroy_test.go +++ b/tests/simple_app_create_destroy_test.go @@ -92,7 +92,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -102,10 +102,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) @@ -145,10 +145,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/three_apps_with_limit_fish_restart_test.go b/tests/three_apps_with_limit_fish_restart_test.go index 0c45282..22a24e6 100644 --- a/tests/three_apps_with_limit_fish_restart_test.go +++ b/tests/three_apps_with_limit_fish_restart_test.go @@ -131,7 +131,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application 1 should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -141,10 +141,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -158,10 +158,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) @@ -174,10 +174,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) @@ -194,10 +194,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - t.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + t.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) @@ -209,10 +209,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - t.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + t.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) @@ -224,10 +224,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) @@ -260,10 +260,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -277,10 +277,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) @@ -294,10 +294,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) }) @@ -321,10 +321,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/three_apps_with_limit_test.go b/tests/three_apps_with_limit_test.go index 69820eb..829449a 100644 --- a/tests/three_apps_with_limit_test.go +++ b/tests/three_apps_with_limit_test.go @@ -129,7 +129,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application 1 should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -139,10 +139,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -156,10 +156,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) @@ -172,10 +172,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) @@ -208,10 +208,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -225,10 +225,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) @@ -242,10 +242,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) }) @@ -269,10 +269,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 3 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 3 Status is incorrect: %v", appState.Status) } }) }) diff --git a/tests/two_apps_with_limit_test.go b/tests/two_apps_with_limit_test.go index f7b8ae4..b5ddea8 100644 --- a/tests/two_apps_with_limit_test.go +++ b/tests/two_apps_with_limit_test.go @@ -114,7 +114,7 @@ drivers: } }) - var app_state types.ApplicationState + var appState types.ApplicationState t.Run("Application 1 should get ALLOCATED in 10 sec", func(t *testing.T) { h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). @@ -124,10 +124,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -140,10 +140,10 @@ drivers: Expect(t). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusNEW { - t.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusNEW { + t.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) @@ -182,10 +182,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 1 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 1 Status is incorrect: %v", appState.Status) } }) }) @@ -199,10 +199,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) @@ -241,10 +241,10 @@ drivers: Expect(r). Status(http.StatusOK). End(). - JSON(&app_state) + JSON(&appState) - if app_state.Status != types.ApplicationStatusDEALLOCATED { - r.Fatalf("Application 2 Status is incorrect: %v", app_state.Status) + if appState.Status != types.ApplicationStatusDEALLOCATED { + r.Fatalf("Application 2 Status is incorrect: %v", appState.Status) } }) }) From 9cd33d10bbe099a2db7842863f39fcbfd0d1b394 Mon Sep 17 00:00:00 2001 From: Sergei Parshev Date: Fri, 6 Sep 2024 15:50:10 -0400 Subject: [PATCH 3/6] Fixed issue with code generation before running static checks --- .github/workflows/main.yml | 5 +++++ build.sh | 3 +++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 30ad651..b560b32 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -56,9 +56,14 @@ jobs: checks: write steps: - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: stable # Linter will use go.mod file to adjust the rules properly + + - name: Generate code + run: ONLYGEN=1 ./build.sh + - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: diff --git a/build.sh b/build.sh index caa78a7..c1728e6 100755 --- a/build.sh +++ b/build.sh @@ -39,6 +39,9 @@ PATH="$gopath/bin:$PATH" go generate -v ./lib/... sed -i.bak 's/^type LabelDefinitions = /type LabelDefinitions /' lib/openapi/types/types.gen.go rm -f lib/openapi/types/types.gen.go.bak +# If ONLYGEN is specified - skip the build +[ -z "$ONLYGEN" ] || exit 0 + # Prepare version number as overrides during link mod_name=$(grep '^module' "${root_dir}/go.mod" | cut -d' ' -f 2) git_version="$(git describe --tags --match 'v*')$([ "$(git diff)" = '' ] || echo '-dirty')" From 5653d02ca81d823041ccf989351f720ecd3c927f Mon Sep 17 00:00:00 2001 From: Sergei Parshev Date: Sat, 7 Sep 2024 00:53:33 -0400 Subject: [PATCH 4/6] Completed huge refactoring by revive --- .golangci.yml | 32 +++++- cmd/fish/fish.go | 33 +++--- docs/openapi.yaml | 12 +- lib/build/build.go | 3 + lib/crypt/crypt.go | 18 +-- lib/crypt/init_tls_pair_ca.go | 28 ++--- lib/drivers/aws/config.go | 8 +- lib/drivers/aws/dedicated_pool.go | 94 +++++++-------- lib/drivers/aws/driver.go | 78 +++++++------ lib/drivers/aws/options.go | 21 ++-- lib/drivers/aws/task_image.go | 36 +++--- lib/drivers/aws/task_snapshot.go | 10 +- lib/drivers/aws/util.go | 80 ++++++------- lib/drivers/docker/config.go | 21 ++-- lib/drivers/docker/driver.go | 73 ++++++------ lib/drivers/docker/options.go | 25 ++-- lib/drivers/docker/util.go | 42 +++---- lib/drivers/driver.go | 6 +- lib/drivers/image.go | 30 ++--- lib/drivers/image_test.go | 48 ++++---- lib/drivers/native/config.go | 107 +++++++----------- lib/drivers/native/driver.go | 61 +++++----- lib/drivers/native/options.go | 33 +++--- lib/drivers/native/util.go | 32 +++--- lib/drivers/task.go | 1 + lib/drivers/test/config.go | 12 +- lib/drivers/test/driver.go | 49 ++++---- lib/drivers/test/options.go | 5 +- .../test/{tasks.go => tasks_snapshot.go} | 9 +- lib/drivers/vmx/config.go | 22 ++-- lib/drivers/vmx/driver.go | 81 +++++++------ lib/drivers/vmx/options.go | 25 ++-- lib/drivers/vmx/util.go | 30 ++--- lib/fish/application.go | 12 +- lib/fish/application_state.go | 12 +- lib/fish/application_task.go | 15 ++- lib/fish/config.go | 11 +- lib/fish/drivers.go | 11 +- lib/fish/fish.go | 59 ++++++---- lib/fish/label.go | 6 +- lib/fish/location.go | 13 +-- lib/fish/node.go | 20 ++-- lib/fish/resource.go | 35 +++--- lib/fish/resource_access.go | 9 +- lib/fish/servicemapping.go | 7 +- lib/fish/user.go | 9 +- lib/fish/vote.go | 27 +++-- lib/log/log.go | 91 +++++++++------ lib/openapi/api/api_v1.go | 73 +++++++++--- lib/openapi/meta/meta_v1.go | 17 ++- lib/openapi/openapi.go | 14 ++- lib/openapi/types/authentication.go | 6 +- lib/openapi/types/label_definitions.go | 5 +- lib/openapi/types/node.go | 5 +- lib/openapi/types/node_definition.go | 6 +- lib/openapi/types/resources.go | 12 +- lib/{proxy_socks => proxysocks}/proxy.go | 18 ++- lib/{proxy_ssh => proxyssh}/proxy.go | 15 ++- lib/util/contains.go | 2 + lib/util/dot_serialize.go | 2 +- lib/util/duration.go | 5 +- lib/util/expression_sql_filter.go | 6 +- lib/util/expression_sql_filter_test.go | 24 ++-- lib/util/file_copy.go | 1 + lib/util/file_replace_block.go | 7 +- lib/util/file_replace_token.go | 34 +++--- lib/util/file_starts_with.go | 13 ++- lib/util/file_starts_with_test.go | 12 +- lib/util/human_size.go | 13 ++- lib/util/lock.go | 6 +- lib/util/metadata_processing.go | 2 +- lib/util/passthrough_monitor.go | 2 +- lib/util/streamlog_monitor.go | 12 +- lib/util/unparsed_json.go | 15 ++- tests/allocate_apps_stress_test.go | 4 +- tests/allocate_multidefinition_label_test.go | 36 +++--- .../application_task_notexisting_fail_test.go | 14 +-- .../application_task_snapshot_by_user_test.go | 22 ++-- tests/cant_allocate_too_big_label_test.go | 16 +-- tests/default_lifetime_timeout_test.go | 10 +- ...nerated_uids_prefix_is_node_prefix_test.go | 14 +-- tests/helper/copy.go | 3 +- tests/helper/fish.go | 40 +++---- tests/helper/retry.go | 20 +++- tests/helper/t_mock.go | 8 +- tests/json_label_create_test.go | 2 +- tests/label_find_filter_sql_injection_test.go | 18 +-- tests/label_lifetime_timeout_test.go | 10 +- ...overrides_default_lifetime_timeout_test.go | 10 +- tests/maintenance_mode_test.go | 12 +- tests/multiple_driver_instances_test.go | 26 ++--- tests/node_filter_test.go | 24 ++-- tests/shutdown_mode_test.go | 44 +++---- tests/simple_app_create_destroy_test.go | 12 +- ...three_apps_with_limit_fish_restart_test.go | 34 +++--- tests/three_apps_with_limit_test.go | 28 ++--- tests/two_apps_with_limit_test.go | 24 ++-- tests/yaml_label_create_test.go | 2 +- 98 files changed, 1274 insertions(+), 988 deletions(-) rename lib/drivers/test/{tasks.go => tasks_snapshot.go} (91%) rename lib/{proxy_socks => proxysocks}/proxy.go (73%) rename lib/{proxy_ssh => proxyssh}/proxy.go (96%) diff --git a/.golangci.yml b/.golangci.yml index 415447a..35dd414 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,6 +8,9 @@ run: output: show-stats: true +issues: + max-issues-per-linter: 0 # unlimited + linters: # Disable all linters. # Default: false @@ -95,7 +98,7 @@ linters: - promlinter - protogetter - reassign - #- revive # not supporting snake_case for vars + - revive - rowserrcheck - sloglint - spancheck @@ -126,3 +129,30 @@ linters-settings: gosec: excludes: - G115 # integer overflow conversion - disabled due to found no proper way to fix those + revive: + enable-all-rules: true + rules: + - name: comment-spacings + disabled: true + - name: line-length-limit + disabled: true + - name: add-constant + disabled: true + - name: unhandled-error + disabled: true + - name: cognitive-complexity + disabled: true + - name: bare-return + disabled: true + - name: modifies-value-receiver + disabled: true + - name: cyclomatic + disabled: true + - name: confusing-results + disabled: true + - name: function-length + disabled: true + - name: flag-parameter + disabled: true + - name: max-control-nesting + disabled: true diff --git a/cmd/fish/fish.go b/cmd/fish/fish.go index b1a65bb..7ad22f5 100644 --- a/cmd/fish/fish.go +++ b/cmd/fish/fish.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Starting point for fish cmd package main import ( @@ -31,8 +32,8 @@ import ( "github.com/adobe/aquarium-fish/lib/fish" "github.com/adobe/aquarium-fish/lib/log" "github.com/adobe/aquarium-fish/lib/openapi" - "github.com/adobe/aquarium-fish/lib/proxy_socks" - "github.com/adobe/aquarium-fish/lib/proxy_ssh" + "github.com/adobe/aquarium-fish/lib/proxysocks" + "github.com/adobe/aquarium-fish/lib/proxyssh" "github.com/adobe/aquarium-fish/lib/util" ) @@ -41,7 +42,7 @@ func main() { var apiAddress string var proxySocksAddress string - var proxySshAddress string + var proxySSHAddress string var nodeAddress string var cfgPath string var dir string @@ -54,7 +55,7 @@ func main() { Use: "aquarium-fish", Short: "Aquarium fish", Long: `Part of the Aquarium suite - a distributed resources manager`, - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + PersistentPreRunE: func(_ /*cmd*/ *cobra.Command, _ /*args*/ []string) (err error) { if err = log.SetVerbosity(logVerbosity); err != nil { return err } @@ -62,7 +63,7 @@ func main() { return log.InitLoggers() }, - RunE: func(cmd *cobra.Command, args []string) (err error) { + RunE: func(_ /*cmd*/ *cobra.Command, _ /*args*/ []string) (err error) { log.Info("Fish init...") cfg := &fish.Config{} @@ -75,8 +76,8 @@ func main() { if proxySocksAddress != "" { cfg.ProxySocksAddress = proxySocksAddress } - if proxySshAddress != "" { - cfg.ProxySshAddress = proxySshAddress + if proxySSHAddress != "" { + cfg.ProxySSHAddress = proxySSHAddress } if nodeAddress != "" { cfg.NodeAddress = nodeAddress @@ -89,7 +90,7 @@ func main() { if err != nil { return log.Errorf("Fish: Unable to parse cpu limit value: %v", err) } - cfg.CpuLimit = uint16(val) + cfg.CPULimit = uint16(val) } if memTarget != "" { if cfg.MemTarget, err = util.NewHumanSize(memTarget); err != nil { @@ -98,9 +99,9 @@ func main() { } // Set Fish Node resources limits - if cfg.CpuLimit > 0 { - log.Info("Fish CPU limited:", cfg.CpuLimit) - runtime.GOMAXPROCS(int(cfg.CpuLimit)) + if cfg.CPULimit > 0 { + log.Info("Fish CPU limited:", cfg.CPULimit) + runtime.GOMAXPROCS(int(cfg.CPULimit)) } if cfg.MemTarget > 0 { log.Info("Fish MEM targeted:", cfg.MemTarget.String()) @@ -125,13 +126,13 @@ func main() { if !filepath.IsAbs(certPath) { certPath = filepath.Join(cfg.Directory, certPath) } - if err = crypt.InitTlsPairCa([]string{cfg.NodeName, cfg.NodeAddress}, caPath, keyPath, certPath); err != nil { + if err = crypt.InitTLSPairCa([]string{cfg.NodeName, cfg.NodeAddress}, caPath, keyPath, certPath); err != nil { return err } log.Info("Fish starting ORM...") db, err := gorm.Open(sqlite.Open(filepath.Join(dir, "sqlite.db")), &gorm.Config{ - Logger: logger.New(log.ErrorLogger, logger.Config{ + Logger: logger.New(log.GetErrorLogger(), logger.Config{ SlowThreshold: 500 * time.Millisecond, LogLevel: logger.Error, IgnoreRecordNotFoundError: true, @@ -154,7 +155,7 @@ func main() { } log.Info("Fish starting socks5 proxy...") - err = proxy_socks.Init(fish, cfg.ProxySocksAddress) + err = proxysocks.Init(fish, cfg.ProxySocksAddress) if err != nil { return err } @@ -164,7 +165,7 @@ func main() { if !filepath.IsAbs(idRsaPath) { idRsaPath = filepath.Join(cfg.Directory, idRsaPath) } - err = proxy_ssh.Init(fish, idRsaPath, cfg.ProxySshAddress) + err = proxyssh.Init(fish, idRsaPath, cfg.ProxySSHAddress) if err != nil { return err } @@ -199,7 +200,7 @@ func main() { flags := cmd.Flags() flags.StringVarP(&apiAddress, "api", "a", "", "address used to expose the fish API") flags.StringVar(&proxySocksAddress, "socks_proxy", "", "address used to expose the SOCKS5 proxy") - flags.StringVar(&proxySshAddress, "ssh_proxy", "", "address used to expose the SSH proxy") + flags.StringVar(&proxySSHAddress, "ssh_proxy", "", "address used to expose the SSH proxy") flags.StringVarP(&nodeAddress, "node", "n", "", "node external endpoint to connect to tell the other nodes") flags.StringVarP(&cfgPath, "cfg", "c", "", "yaml configuration file") flags.StringVarP(&dir, "dir", "D", "", "database and other fish files directory") diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 57b16f2..9c986a7 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -1101,7 +1101,7 @@ components: x-oapi-codegen-extra-tags: yaml: label_UID metadata: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: Additional metadata in JSON format (can't override Label metadata) example: JENKINS_URL: 'http://172.16.1.1:8085/' @@ -1203,10 +1203,10 @@ components: Used to specify when the task should be executed, right now only ALLOCATED, DEALLOCATE and RECALLED (when app is already here) are supported. options: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: JSON object with additional options result: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: JSON object with the results of task execution UserName: @@ -1322,7 +1322,7 @@ components: $ref: '#/components/schemas/Resources' description: Resources Driver need to provide for the Label execution options: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: Driver-specific options to execute the environment authentication: $ref: '#/components/schemas/Authentication' @@ -1376,7 +1376,7 @@ components: definitions: $ref: '#/components/schemas/LabelDefinitions' metadata: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: Basic metadata to pass to the Resource example: JENKINS_AGENT_WORKSPACE: D:\ @@ -1620,7 +1620,7 @@ components: MAC or any other type of network address which will allow to properly identify the node through network interaction. metadata: - x-go-type: util.UnparsedJson + x-go-type: util.UnparsedJSON description: > Combined Application and Label metadata (in this order) to make it available through `Meta API` to the resource. diff --git a/lib/build/build.go b/lib/build/build.go index a4c3ad5..a46f5b6 100644 --- a/lib/build/build.go +++ b/lib/build/build.go @@ -10,8 +10,11 @@ * governing permissions and limitations under the License. */ +// Package build contains version of the binary & time when it was built package build +// Version of the build var Version = "v0.0.0-dev" +// Time of the build var Time string diff --git a/lib/crypt/crypt.go b/lib/crypt/crypt.go index d8bb6ac..ce3fed0 100644 --- a/lib/crypt/crypt.go +++ b/lib/crypt/crypt.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package crypt contains a number of cryptographic functions package crypt import ( @@ -22,6 +23,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// Default parameters for the Argon2 hashing and some charsets usable for representing the data const ( Argon2Algo = "Argon2id" // Default tuned to process at least 20 API requests/sec on 2CPU @@ -43,6 +45,7 @@ const ( RandStringCharsetAZ = "abcdefghijklmnopqrstuvwxyz" // Only a-z ) +// Hash contains everything needed for storing and reproducing password hash type Hash struct { Algo string Prop properties `gorm:"embedded;embeddedPrefix:prop_"` @@ -57,7 +60,7 @@ type properties struct { Threads uint8 } -// Create random bytes of specified size +// RandBytes create random bytes of specified size func RandBytes(size int) (data []byte) { data = make([]byte, size) if _, err := rand.Read(data); err != nil { @@ -66,12 +69,12 @@ func RandBytes(size int) (data []byte) { return } -// By default use base58 +// RandString generates random string with base58 characters func RandString(size int) string { return RandStringCharset(size, RandStringCharsetB58) } -// Create random string of specified size +// RandStringCharset creates random string of specified size func RandStringCharset(size int, charset string) string { data := make([]byte, size) charsetLen := big.NewInt(int64(len(charset))) @@ -85,7 +88,7 @@ func RandStringCharset(size int, charset string) string { return string(data) } -// Generate a salted hash for the input string with default parameters +// NewHash generates a salted hash for the input string with default parameters func NewHash(input string, salt []byte) (h Hash) { h.Algo = Argon2Algo if salt != nil { @@ -103,7 +106,7 @@ func NewHash(input string, salt []byte) (h Hash) { return } -// Check the input equal to the current hashed one +// IsEqual checks the input equal to the current hashed one func (h *Hash) IsEqual(input string) bool { if h.Algo == v074Argon2Algo { // Legacy low-performant parameters, not defined in hash @@ -115,6 +118,7 @@ func (h *Hash) IsEqual(input string) bool { return bytes.Equal(h.Hash, argon2.IDKey([]byte(input), h.Salt, h.Prop.Iterations, h.Prop.Memory, h.Prop.Threads, uint32(len(h.Hash)))) } -func (hash *Hash) IsEmpty() bool { - return hash.Algo == "" +// IsEmpty shows is the hash is actually not filled with data +func (h *Hash) IsEmpty() bool { + return h.Algo == "" } diff --git a/lib/crypt/init_tls_pair_ca.go b/lib/crypt/init_tls_pair_ca.go index da34da3..556f39c 100644 --- a/lib/crypt/init_tls_pair_ca.go +++ b/lib/crypt/init_tls_pair_ca.go @@ -29,7 +29,8 @@ import ( "time" ) -func InitTlsPairCa(hosts []string, caPath, keyPath, crtPath string) error { +// InitTLSPairCa creates a pair of asymmetric keys and CA if needed +func InitTLSPairCa(hosts []string, caPath, keyPath, crtPath string) error { // Generates simple CA and Node certificate signed by the CA _, caErr := os.Stat(caPath) if os.IsNotExist(caErr) { @@ -98,22 +99,20 @@ func generateSimpleCa(keyPath, crtPath string) error { } // Create private key file - if err := createKey(keyPath, priv); err != nil { - return err - } + err = createKey(keyPath, priv) - return nil + return err } func generateSimpleKeyCert(hosts []string, keyPath, crtPath, caPath string) error { // Load the CA key and cert - caTls, err := tls.LoadX509KeyPair(caPath, getCaKeyFromCertPath(caPath)) + caTLS, err := tls.LoadX509KeyPair(caPath, getCaKeyFromCertPath(caPath)) if err != nil { return err } - caKey := caTls.PrivateKey + caKey := caTLS.PrivateKey - caCrt, err := x509.ParseCertificate(caTls.Certificate[0]) + caCrt, err := x509.ParseCertificate(caTLS.Certificate[0]) if err != nil { return err } @@ -165,11 +164,9 @@ func generateSimpleKeyCert(hosts []string, keyPath, crtPath, caPath string) erro } // Create private key file - if err := createKey(keyPath, priv); err != nil { - return err - } + err = createKey(keyPath, priv) - return nil + return err } func createCert(crtPath string, pubkey crypto.PublicKey, caKey crypto.PrivateKey, cert, caCrt *x509.Certificate) error { @@ -210,9 +207,8 @@ func createKey(keyPath string, key crypto.PrivateKey) error { if err != nil { return err } - if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { - return err - } - return nil + err = pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) + + return err } diff --git a/lib/drivers/aws/config.go b/lib/drivers/aws/config.go index ed3423e..210de82 100644 --- a/lib/drivers/aws/config.go +++ b/lib/drivers/aws/config.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package aws implements driver package aws import ( @@ -25,6 +26,7 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// Config - node driver configuration type Config struct { Region string `json:"region"` // AWS Region to connect to KeyID string `json:"key_id"` // AWS AMI Key ID @@ -43,7 +45,7 @@ type Config struct { ImageCreateWait util.Duration `json:"image_create_wait"` // Maximum wait time for image availability (create/copy), default: 2h } -// Stores the configuration of AWS dedicated pool of particular type to manage +// DedicatedPoolRecord stores the configuration of AWS dedicated pool of particular type to manage // aws ec2 allocate-hosts --availability-zone "us-west-2c" --auto-placement "on" --host-recovery "off" --host-maintenance "off" --quantity 1 --instance-type "mac2.metal" type DedicatedPoolRecord struct { Type string `json:"type"` // Instance type handled by the dedicated hosts pool (example: "mac2.metal") @@ -80,6 +82,7 @@ type DedicatedPoolRecord struct { ScrubbingDelay util.Duration `json:"scrubbing_delay"` } +// Apply takes json and applies it to the config structure func (c *Config) Apply(config []byte) error { // Parse json if len(config) > 0 { @@ -91,6 +94,7 @@ func (c *Config) Apply(config []byte) error { return nil } +// Validate makes sure the config have the required defaults & that the required fields are set func (c *Config) Validate() (err error) { // Check that values of the config is filled at least with defaults if c.Region == "" { @@ -107,7 +111,7 @@ func (c *Config) Validate() (err error) { // Verify that connection is possible with those creds and get the account ID conn := sts.NewFromConfig(aws.Config{ Region: c.Region, - Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) { + Credentials: aws.CredentialsProviderFunc(func(_ /*ctx*/ context.Context) (aws.Credentials, error) { return aws.Credentials{ AccessKeyID: c.KeyID, SecretAccessKey: c.SecretKey, diff --git a/lib/drivers/aws/dedicated_pool.go b/lib/drivers/aws/dedicated_pool.go index 92d6175..10e6903 100644 --- a/lib/drivers/aws/dedicated_pool.go +++ b/lib/drivers/aws/dedicated_pool.go @@ -27,7 +27,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) -// Custom status to set in the host for simplifying parallel ops in between the updates +// HostReserved - custom status to set in the host for simplifying parallel ops in between the updates const HostReserved = "reserved" // TODO: Right now logic pinned to just one node, need to be distributed @@ -41,8 +41,8 @@ type dedicatedPoolWorker struct { // Amount of instances per dedicated host used in capacity calculations instancesPerHost uint - // It's better to update active_hosts by calling updateDedicatedHosts() - active_hosts map[string]ec2types.Host + // It's better to update activeHosts by calling updateDedicatedHosts() + activeHosts map[string]ec2types.Host activeHostsUpdated time.Time activeHostsMu sync.RWMutex @@ -57,8 +57,8 @@ func (d *Driver) newDedicatedPoolWorker(name string, record DedicatedPoolRecord) driver: d, record: record, - active_hosts: make(map[string]ec2types.Host), - toManageAt: make(map[string]time.Time), + activeHosts: make(map[string]ec2types.Host), + toManageAt: make(map[string]time.Time), } // Receiving amount of instances per dedicated host @@ -89,13 +89,13 @@ func (w *dedicatedPoolWorker) AvailableCapacity(instanceType string) int64 { // Looking for the available hosts in the list and their capacity w.activeHostsMu.RLock() defer w.activeHostsMu.RUnlock() - for _, host := range w.active_hosts { + for _, host := range w.activeHosts { // For now support only single-type dedicated hosts, because primary goal is mac machines instCount += int64(getHostCapacity(&host)) } // Let's add the amount of instances we can allocate - instCount += (int64(w.record.Max) - int64(len(w.active_hosts))) * int64(w.instancesPerHost) + instCount += (int64(w.record.Max) - int64(len(w.activeHosts))) * int64(w.instancesPerHost) log.Debugf("AWS: dedicated %q: AvailableCapacity for dedicated host type %q: %d", w.name, w.record.Type, instCount) @@ -116,9 +116,9 @@ func (w *dedicatedPoolWorker) ReserveHost(instanceType string) string { var availableHosts []string // Look for the hosts with capacity - for hostId, host := range w.active_hosts { + for hostID, host := range w.activeHosts { if getHostCapacity(&host) > 0 { - availableHosts = append(availableHosts, hostId) + availableHosts = append(availableHosts, hostID) } } @@ -128,10 +128,10 @@ func (w *dedicatedPoolWorker) ReserveHost(instanceType string) string { } // Pick random one from the list of available hosts to reduce the possibility of conflict - host := w.active_hosts[availableHosts[rand.Intn(len(availableHosts))]] // #nosec G404 + host := w.activeHosts[availableHosts[rand.Intn(len(availableHosts))]] // #nosec G404 // Mark it as reserved temporary to ease multi-allocation at the same time host.State = HostReserved - w.active_hosts[aws.ToString(host.HostId)] = host + w.activeHosts[aws.ToString(host.HostId)] = host return aws.ToString(host.HostId) } @@ -142,7 +142,7 @@ func (w *dedicatedPoolWorker) AllocateHost(instanceType string) string { return "" } - currActiveHosts := len(w.active_hosts) + currActiveHosts := len(w.activeHosts) if w.record.Max <= uint(currActiveHosts) { log.Warnf("AWS: dedicated %q: Unable to request new host due to reached the maximum limit: %d <= %d", w.name, w.record.Max, currActiveHosts) return "" @@ -233,30 +233,30 @@ func (w *dedicatedPoolWorker) manageHosts() []string { var toRelease []string // Going through the process list - for hostId, timeout := range w.toManageAt { - if host, ok := w.active_hosts[hostId]; !ok || isHostUsed(&host) { + for hostID, timeout := range w.toManageAt { + if host, ok := w.activeHosts[hostID]; !ok || isHostUsed(&host) { // The host is disappeared or used, we don't need to manage it out anymore - toClean = append(toClean, hostId) + toClean = append(toClean, hostID) continue } // Host seems still exists and not used - check for timeout if timeout.Before(time.Now()) { // Timeout for the host reached - let's put it in the release bucket - toRelease = append(toRelease, hostId) + toRelease = append(toRelease, hostID) } } // Cleaning up the manage list - for _, hostId := range toClean { - delete(w.toManageAt, hostId) + for _, hostID := range toClean { + delete(w.toManageAt, hostID) } // Going through the active hosts and updating to_manage list - for hostId, host := range w.active_hosts { + for hostID, host := range w.activeHosts { if host.State == ec2types.AllocationStatePermanentFailure { // Immediately release - we don't need failed hosts in our pool - toRelease = append(toRelease, hostId) + toRelease = append(toRelease, hostID) } // We don't need to manage out the hosts in use @@ -272,7 +272,7 @@ func (w *dedicatedPoolWorker) manageHosts() []string { // Skipping the hosts that already in managed list found := false for hid := range w.toManageAt { - if hostId == hid { + if hostID == hid { found = true break } @@ -284,11 +284,11 @@ func (w *dedicatedPoolWorker) manageHosts() []string { // Check if mac - giving it some time before action release or scrubbing // If not mac or mac is old: giving a chance to be reused - will be processed next cycle if isHostMac(&host) && !isMacTooOld(&host) { - w.toManageAt[hostId] = time.Now().Add(time.Duration(w.record.ScrubbingDelay)) + w.toManageAt[hostID] = time.Now().Add(time.Duration(w.record.ScrubbingDelay)) } else { - w.toManageAt[hostId] = time.Now() + w.toManageAt[hostID] = time.Now() } - log.Debugf("AWS: dedicated %q: Added new host to be managed out: %q at %q", w.name, hostId, w.toManageAt[hostId]) + log.Debugf("AWS: dedicated %q: Added new host to be managed out: %q at %q", w.name, hostID, w.toManageAt[hostID]) } return toRelease @@ -309,12 +309,12 @@ func (w *dedicatedPoolWorker) releaseHosts(releaseHosts []string) { // Check if there are macs which need a special treatment var macHosts []string var toRelease []string - for _, hostId := range releaseHosts { + for _, hostID := range releaseHosts { // Special treatment for mac hosts - it makes not much sense to try to release them until // they've live for 24h due to Apple-AWS license. - if host, ok := w.active_hosts[hostId]; ok && host.HostProperties != nil { + if host, ok := w.activeHosts[hostID]; ok && host.HostProperties != nil { if isHostMac(&host) { - macHosts = append(macHosts, hostId) + macHosts = append(macHosts, hostID) // If mac host not reached 24h since allocation - skipping addition to the release list if !isHostReadyForRelease(&host) { continue @@ -322,7 +322,7 @@ func (w *dedicatedPoolWorker) releaseHosts(releaseHosts []string) { } } // Adding any host to to_release list - toRelease = append(toRelease, hostId) + toRelease = append(toRelease, hostID) } // Run the release process for multiple hosts @@ -333,21 +333,21 @@ func (w *dedicatedPoolWorker) releaseHosts(releaseHosts []string) { } // Cleanup the released hosts from the active hosts list - for _, hostId := range toRelease { + for _, hostID := range toRelease { // Skipping if release of the host failed for some reason - for _, failedHostId := range releaseFailed { - if failedHostId == hostId { + for _, failedHostID := range releaseFailed { + if failedHostID == hostID { continue } } - delete(w.active_hosts, hostId) + delete(w.activeHosts, hostID) } // Scrubbing the rest of mac hosts if len(macHosts) > 0 && w.record.ScrubbingDelay != 0 { - for _, hostId := range macHosts { - host, ok := w.active_hosts[hostId] + for _, hostID := range macHosts { + host, ok := w.activeHosts[hostID] if !ok || host.State == ec2types.AllocationStatePending { // The host was released or already in scrubbing - skipping it continue @@ -355,16 +355,16 @@ func (w *dedicatedPoolWorker) releaseHosts(releaseHosts []string) { // Reserve the host internally for scrubbing process to prevent allocation issues host.State = HostReserved - w.active_hosts[aws.ToString(host.HostId)] = host + w.activeHosts[aws.ToString(host.HostId)] = host // Triggering the scrubbing process - if err := w.driver.triggerHostScrubbing(hostId, aws.ToString(host.HostProperties.InstanceType)); err != nil { - log.Errorf("AWS: dedicated %q: Unable to run scrubbing for host %q: %v", w.name, hostId, err) + if err := w.driver.triggerHostScrubbing(hostID, aws.ToString(host.HostProperties.InstanceType)); err != nil { + log.Errorf("AWS: dedicated %q: Unable to run scrubbing for host %q: %v", w.name, hostID, err) continue } // Removing the host from the list - delete(w.active_hosts, hostId) + delete(w.activeHosts, hostID) } } } @@ -489,11 +489,11 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { } for _, rh := range resp.Hosts { - hostId := aws.ToString(rh.HostId) - currActiveHosts[hostId] = rh + hostID := aws.ToString(rh.HostId) + currActiveHosts[hostID] = rh // If the response host has not changed, use the same object in the active list - if ah, ok := w.active_hosts[hostId]; ok && ah.State == rh.State && len(ah.Instances) == len(rh.Instances) { - currActiveHosts[hostId] = w.active_hosts[hostId] + if ah, ok := w.activeHosts[hostID]; ok && ah.State == rh.State && len(ah.Instances) == len(rh.Instances) { + currActiveHosts[hostID] = w.activeHosts[hostID] } } } @@ -503,13 +503,13 @@ func (w *dedicatedPoolWorker) updateDedicatedHosts() error { defer w.activeHostsMu.Unlock() w.activeHostsUpdated = time.Now() - w.active_hosts = currActiveHosts + w.activeHosts = currActiveHosts // Printing list for debug purposes - if log.Verbosity == 1 { - log.Debugf("AWS: dedicated %q: Amount of active hosts in pool: %d", w.name, len(w.active_hosts)) - for hostId, host := range w.active_hosts { - log.Debugf("AWS: dedicated %q: active_hosts item: host_id:%q, allocated:%q, state:%q, capacity:%d (%d)", w.name, hostId, host.AllocationTime, host.State, getHostCapacity(&host), w.instancesPerHost) + if log.GetVerbosity() == 1 { + log.Debugf("AWS: dedicated %q: Amount of active hosts in pool: %d", w.name, len(w.activeHosts)) + for hostID, host := range w.activeHosts { + log.Debugf("AWS: dedicated %q: active_hosts item: host_id:%q, allocated:%q, state:%q, capacity:%d (%d)", w.name, hostID, host.AllocationTime, host.State, getHostCapacity(&host), w.instancesPerHost) } } diff --git a/lib/drivers/aws/driver.go b/lib/drivers/aws/driver.go index 7f3428c..a69ac26 100644 --- a/lib/drivers/aws/driver.go +++ b/lib/drivers/aws/driver.go @@ -35,14 +35,16 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -// Implements drivers.ResourceDriverFactory interface +// Factory implements drivers.ResourceDriverFactory interface type Factory struct{} -func (f *Factory) Name() string { +// Name shows name of the driver factory +func (*Factory) Name() string { return "aws" } -func (f *Factory) NewResourceDriver() drivers.ResourceDriver { +// NewResourceDriver creates new resource driver +func (*Factory) NewResourceDriver() drivers.ResourceDriver { return &Driver{} } @@ -50,7 +52,7 @@ func init() { drivers.FactoryList = append(drivers.FactoryList, &Factory{}) } -// Implements drivers.ResourceDriver interface +// Driver implements drivers.ResourceDriver interface type Driver struct { cfg Config // Contains the available tasks of the driver @@ -64,14 +66,17 @@ type Driver struct { dedicatedPools map[string]*dedicatedPoolWorker } -func (d *Driver) Name() string { +// Name returns name of the driver +func (*Driver) Name() string { return "aws" } -func (d *Driver) IsRemote() bool { +// IsRemote needed to detect the out-of-node resources managed by this driver +func (*Driver) IsRemote() bool { return true } +// Prepare initializes the driver func (d *Driver) Prepare(config []byte) error { if err := d.cfg.Apply(config); err != nil { return err @@ -112,7 +117,8 @@ func (d *Driver) Prepare(config []byte) error { return nil } -func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { +// ValidateDefinition checks LabelDefinition is ok +func (*Driver) ValidateDefinition(def types.LabelDefinition) error { var opts Options if err := opts.Apply(def.Options); err != nil { return err @@ -126,8 +132,8 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { return nil } -// Allow Fish to ask the driver about it's capacity (free slots) of a specific definition -func (d *Driver) AvailableCapacity(nodeUsage types.Resources, def types.LabelDefinition) int64 { +// AvailableCapacity allows Fish to ask the driver about it's capacity (free slots) of a specific definition +func (d *Driver) AvailableCapacity(_ /*nodeUsage*/ types.Resources, def types.LabelDefinition) int64 { var instCount int64 var opts Options @@ -223,7 +229,7 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, def types.LabelDef } // Checking the current usage of CPU's of this project and subtracting it from quota value - cpuUsage, err := d.getProjectCpuUsage(connEc2, instTypes) + cpuUsage, err := d.getProjectCPUUsage(connEc2, instTypes) if err != nil { return -1 } @@ -236,7 +242,7 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, def types.LabelDef // Make sure we have enough IP's in the selected VPC or subnet var ipCount int64 var err error - if _, ipCount, err = d.getSubnetId(connEc2, def.Resources.Network); err != nil { + if _, ipCount, err = d.getSubnetID(connEc2, def.Resources.Network); err != nil { log.Error("AWS: Error during requesting subnet:", err) return -1 } @@ -250,12 +256,10 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, def types.LabelDef return instCount } -/** - * Allocate Instance with provided image - * - * It selects the AMI and run instance - * Uses metadata to fill EC2 instance userdata - */ +// Allocate Instance with provided image +// +// It selects the AMI and run instance +// Uses metadata to fill EC2 instance userdata func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { // Generate fish name buf := crypt.RandBytes(6) @@ -271,13 +275,13 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Checking the VPC exists or use default one vmNetwork := def.Resources.Network var err error - if vmNetwork, _, err = d.getSubnetId(conn, vmNetwork); err != nil { + if vmNetwork, _, err = d.getSubnetID(conn, vmNetwork); err != nil { return nil, fmt.Errorf("AWS: %s: Unable to get subnet: %v", iName, err) } log.Infof("AWS: %s: Selected subnet: %q", iName, vmNetwork) vmImage := opts.Image - if vmImage, err = d.getImageId(conn, vmImage); err != nil { + if vmImage, err = d.getImageID(conn, vmImage); err != nil { return nil, fmt.Errorf("AWS: %s: Unable to get image: %v", iName, err) } log.Infof("AWS: %s: Selected image: %q", iName, vmImage) @@ -302,19 +306,20 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* if opts.Pool != "" { // Let's reserve or allocate the host for the new instance - if p, ok := d.dedicatedPools[opts.Pool]; ok { - hostId := p.ReserveAllocateHost(opts.InstanceType) - if hostId == "" { - return nil, fmt.Errorf("AWS: %s: Unable to reserve host in dedicated pool %q", iName, opts.Pool) - } - input.Placement = &ec2types.Placement{ - Tenancy: ec2types.TenancyHost, - HostId: aws.String(hostId), - } - log.Infof("AWS: %s: Utilizing pool %q host: %s", iName, opts.Pool, hostId) - } else { + p, ok := d.dedicatedPools[opts.Pool] + if !ok { return nil, fmt.Errorf("AWS: %s: Unable to locate the dedicated pool: %s", iName, opts.Pool) } + + hostID := p.ReserveAllocateHost(opts.InstanceType) + if hostID == "" { + return nil, fmt.Errorf("AWS: %s: Unable to reserve host in dedicated pool %q", iName, opts.Pool) + } + input.Placement = &ec2types.Placement{ + Tenancy: ec2types.TenancyHost, + HostId: aws.String(hostID), + } + log.Infof("AWS: %s: Utilizing pool %q host: %s", iName, opts.Pool, hostID) } else if awsInstTypeAny(opts.InstanceType, "mac") { // For mac machines only dedicated hosts are working, so set the tenancy input.Placement = &ec2types.Placement{ @@ -333,7 +338,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* if opts.SecurityGroup != "" { vmSecgroup := opts.SecurityGroup - if vmSecgroup, err = d.getSecGroupId(conn, vmSecgroup); err != nil { + if vmSecgroup, err = d.getSecGroupID(conn, vmSecgroup); err != nil { return nil, fmt.Errorf("AWS: %s: Unable to get security group: %v", iName, err) } log.Infof("AWS: %s: Selected security group: %q", iName, vmSecgroup) @@ -404,7 +409,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* if disk.Clone != "" { // Use snapshot as the disk source vmSnapshot := disk.Clone - if vmSnapshot, err = d.getSnapshotId(conn, vmSnapshot); err != nil { + if vmSnapshot, err = d.getSnapshotID(conn, vmSnapshot); err != nil { return nil, fmt.Errorf("AWS: %s: Unable to get snapshot: %v", iName, err) } log.Infof("AWS: %s: Selected snapshot: %q", iName, vmSnapshot) @@ -414,12 +419,12 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* mapping.Ebs.VolumeSize = aws.Int32(int32(disk.Size)) if opts.EncryptKey != "" { mapping.Ebs.Encrypted = aws.Bool(true) - keyId, err := d.getKeyId(opts.EncryptKey) + keyID, err := d.getKeyID(opts.EncryptKey) if err != nil { return nil, fmt.Errorf("AWS: %s: Unable to get encrypt key from KMS: %v", iName, err) } - log.Infof("AWS: %s: Selected encryption key: %q for disk: %q", iName, keyId, name) - mapping.Ebs.KmsKeyId = aws.String(keyId) + log.Infof("AWS: %s: Selected encryption key: %q for disk: %q", iName, keyID, name) + mapping.Ebs.KmsKeyId = aws.String(keyID) } } input.BlockDeviceMappings = append(input.BlockDeviceMappings, mapping) @@ -517,6 +522,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* return res, log.Errorf("AWS: %s: Unable to locate the instance IP: %q", iName, aws.ToString(inst.InstanceId)) } +// Status shows status of the resource func (d *Driver) Status(res *types.Resource) (string, error) { if res == nil || res.Identifier == "" { return "", fmt.Errorf("AWS: Invalid resource: %v", res) @@ -532,6 +538,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { return drivers.StatusNone, nil } +// GetTask returns task struct by name func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask @@ -552,6 +559,7 @@ func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { return t } +// Deallocate the resource func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return fmt.Errorf("AWS: Invalid resource: %v", res) diff --git a/lib/drivers/aws/options.go b/lib/drivers/aws/options.go index 49bc773..9b77b74 100644 --- a/lib/drivers/aws/options.go +++ b/lib/drivers/aws/options.go @@ -20,14 +20,15 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -/** - * Options example: - * image: ami-abcdef123456 - * instance_type: c6a.4xlarge - * security_group: sg-abcdef123456 - * tags: - * somekey: somevalue - */ +// Options for label definition +// +// Example: +// +// image: ami-abcdef123456 +// instance_type: c6a.4xlarge +// security_group: sg-abcdef123456 +// tags: +// somekey: somevalue type Options struct { Image string `json:"image"` // ID/Name of the image you want to use (name that contains * is usually a bad idea for reproducibility) InstanceType string `json:"instance_type"` // Type of the instance from aws available list @@ -44,7 +45,8 @@ type Options struct { TaskImageEncryptKey string `json:"task_image_encrypt_key"` // KMS Key ID or Alias in format "alias/" if need to re-encrypt the newly created AMI snapshots } -func (o *Options) Apply(options util.UnparsedJson) error { +// Apply takes json and applies it to the options structure +func (o *Options) Apply(options util.UnparsedJSON) error { if err := json.Unmarshal([]byte(options), o); err != nil { return log.Error("AWS: Unable to apply the driver options", err) } @@ -52,6 +54,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { return o.Validate() } +// Validate makes sure the options have the required defaults & that the required fields are set func (o *Options) Validate() error { // Check image if o.Image == "" { diff --git a/lib/drivers/aws/task_image.go b/lib/drivers/aws/task_image.go index cbedf52..9c90662 100644 --- a/lib/drivers/aws/task_image.go +++ b/lib/drivers/aws/task_image.go @@ -26,8 +26,9 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// TaskImage stores the task data type TaskImage struct { - driver *Driver `json:"-"` + driver *Driver *types.ApplicationTask `json:"-"` // Info about the requested task *types.LabelDefinition `json:"-"` // Info about the used label definition @@ -36,22 +37,25 @@ type TaskImage struct { Full bool `json:"full"` // Make full (all disks including connected disks), or just the root OS disk image } -func (t *TaskImage) Name() string { +// Name returns name of the task +func (*TaskImage) Name() string { return "image" } +// Clone makes a copy of the initial task to execute func (t *TaskImage) Clone() drivers.ResourceDriverTask { n := *t return &n } +// SetInfo defines information of the environment func (t *TaskImage) SetInfo(task *types.ApplicationTask, def *types.LabelDefinition, res *types.Resource) { t.ApplicationTask = task t.LabelDefinition = def t.Resource = res } -// Image could be executed during ALLOCATED & DEALLOCATE ApplicationStatus +// Execute - Image task could be executed during ALLOCATED & DEALLOCATE ApplicationStatus func (t *TaskImage) Execute() (result []byte, err error) { if t.ApplicationTask == nil { return []byte(`{"error":"internal: invalid application task"}`), log.Error("AWS: TaskImage: Invalid application task:", t.ApplicationTask) @@ -214,27 +218,27 @@ func (t *TaskImage) Execute() (result []byte, err error) { return []byte(`{"error":"internal: no image was created from instance"}`), log.Errorf("AWS: No image was created from instance %s", t.Resource.Identifier) } - imageId := aws.ToString(resp.ImageId) - log.Infof("AWS: TaskImage %s: Created image %q with id %q...", t.ApplicationTask.UID, aws.ToString(input.Name), imageId) + imageID := aws.ToString(resp.ImageId) + log.Infof("AWS: TaskImage %s: Created image %q with id %q...", t.ApplicationTask.UID, aws.ToString(input.Name), imageID) // Wait for the image to be completed, otherwise if we will start a copy - it will fail... - log.Infof("AWS: TaskImage %s: Wait for image %s %q availability...", t.ApplicationTask.UID, imageId, aws.ToString(input.Name)) + log.Infof("AWS: TaskImage %s: Wait for image %s %q availability...", t.ApplicationTask.UID, imageID, aws.ToString(input.Name)) sw := ec2.NewImageAvailableWaiter(conn) maxWait := time.Duration(t.driver.cfg.ImageCreateWait) waitInput := ec2.DescribeImagesInput{ ImageIds: []string{ - imageId, + imageID, }, } if err = sw.Wait(context.TODO(), &waitInput, maxWait); err != nil { // Need to make sure tmp image will be removed, while target image could stay and complete if opts.TaskImageEncryptKey != "" { - log.Debugf("AWS: TaskImage %s: Cleanup the temp image %q", t.ApplicationTask.UID, imageId) - if err := t.driver.deleteImage(conn, imageId); err != nil { + log.Debugf("AWS: TaskImage %s: Cleanup the temp image %q", t.ApplicationTask.UID, imageID) + if err := t.driver.deleteImage(conn, imageID); err != nil { log.Errorf("AWS: TaskImage %s: Unable to cleanup the temp image %s: %v", t.ApplicationTask.UID, t.Resource.Identifier, err) } } - return []byte(`{"error":"internal: timeout on await for the image availability"}`), log.Error("AWS: Error during wait for the image availability:", imageId, aws.ToString(input.Name), err) + return []byte(`{"error":"internal: timeout on await for the image availability"}`), log.Error("AWS: Error during wait for the image availability:", imageID, aws.ToString(input.Name), err) } // If TaskImageEncryptKey is set - we need to copy the image with enabled encryption and delete the temp one @@ -271,15 +275,15 @@ func (t *TaskImage) Execute() (result []byte, err error) { } // Delete the temp image & associated snapshots - log.Debugf("AWS: TaskImage %s: Deleting the temp image %q", t.ApplicationTask.UID, imageId) - if err = t.driver.deleteImage(conn, imageId); err != nil { - return []byte(`{"error":"internal: unable to delete the tmp image"}`), log.Errorf("AWS: Unable to delete the temp image %s: %v", imageId, err) + log.Debugf("AWS: TaskImage %s: Deleting the temp image %q", t.ApplicationTask.UID, imageID) + if err = t.driver.deleteImage(conn, imageID); err != nil { + return []byte(`{"error":"internal: unable to delete the tmp image"}`), log.Errorf("AWS: Unable to delete the temp image %s: %v", imageID, err) } - imageId = aws.ToString(resp.ImageId) + imageID = aws.ToString(resp.ImageId) } - log.Infof("AWS: Created image for the instance %s: %s %q", t.Resource.Identifier, imageId, imageName) + log.Infof("AWS: Created image for the instance %s: %s %q", t.Resource.Identifier, imageID, imageName) - return json.Marshal(map[string]string{"image": imageId, "image_name": imageName}) + return json.Marshal(map[string]string{"image": imageID, "image_name": imageName}) } diff --git a/lib/drivers/aws/task_snapshot.go b/lib/drivers/aws/task_snapshot.go index c3592fe..5cbbc12 100644 --- a/lib/drivers/aws/task_snapshot.go +++ b/lib/drivers/aws/task_snapshot.go @@ -27,8 +27,9 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// TaskSnapshot stores the task data type TaskSnapshot struct { - driver *Driver `json:"-"` + driver *Driver *types.ApplicationTask `json:"-"` // Info about the requested task *types.LabelDefinition `json:"-"` // Info about the used label definition @@ -37,22 +38,25 @@ type TaskSnapshot struct { Full bool `json:"full"` // Make full (all disks including OS image), or just the additional disks snapshot } -func (t *TaskSnapshot) Name() string { +// Name returns name of the task +func (*TaskSnapshot) Name() string { return "snapshot" } +// Clone makes a copy of the initial task to execute func (t *TaskSnapshot) Clone() drivers.ResourceDriverTask { n := *t return &n } +// SetInfo defines information of the environment func (t *TaskSnapshot) SetInfo(task *types.ApplicationTask, def *types.LabelDefinition, res *types.Resource) { t.ApplicationTask = task t.LabelDefinition = def t.Resource = res } -// Snapshot could be executed during ALLOCATED & DEALLOCATE ApplicationStatus +// Execute - Snapshot task could be executed during ALLOCATED & DEALLOCATE ApplicationStatus func (t *TaskSnapshot) Execute() (result []byte, err error) { if t.ApplicationTask == nil { return []byte(`{"error":"internal: invalid application task"}`), log.Error("AWS: Invalid application task:", t.ApplicationTask) diff --git a/lib/drivers/aws/util.go b/lib/drivers/aws/util.go index 114d947..4db5e13 100644 --- a/lib/drivers/aws/util.go +++ b/lib/drivers/aws/util.go @@ -30,7 +30,7 @@ import ( func (d *Driver) newEC2Conn() *ec2.Client { return ec2.NewFromConfig(aws.Config{ Region: d.cfg.Region, - Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) { + Credentials: aws.CredentialsProviderFunc(func(_ /*ctx*/ context.Context) (aws.Credentials, error) { return aws.Credentials{ AccessKeyID: d.cfg.KeyID, SecretAccessKey: d.cfg.SecretKey, @@ -48,7 +48,7 @@ func (d *Driver) newEC2Conn() *ec2.Client { func (d *Driver) newKMSConn() *kms.Client { return kms.NewFromConfig(aws.Config{ Region: d.cfg.Region, - Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) { + Credentials: aws.CredentialsProviderFunc(func(_ /*ctx*/ context.Context) (aws.Credentials, error) { return aws.Credentials{ AccessKeyID: d.cfg.KeyID, SecretAccessKey: d.cfg.SecretKey, @@ -66,7 +66,7 @@ func (d *Driver) newKMSConn() *kms.Client { func (d *Driver) newServiceQuotasConn() *servicequotas.Client { return servicequotas.NewFromConfig(aws.Config{ Region: d.cfg.Region, - Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) { + Credentials: aws.CredentialsProviderFunc(func(_ /*ctx*/ context.Context) (aws.Credentials, error) { return aws.Credentials{ AccessKeyID: d.cfg.KeyID, SecretAccessKey: d.cfg.SecretKey, @@ -84,7 +84,7 @@ func (d *Driver) newServiceQuotasConn() *servicequotas.Client { // Will verify and return subnet id // In case vpc id was provided - will chose the subnet with less used ip's // Returns the found subnet_id, total count of available ip's and error if some -func (d *Driver) getSubnetId(conn *ec2.Client, idTag string) (string, int64, error) { +func (d *Driver) getSubnetID(conn *ec2.Client, idTag string) (string, int64, error) { filter := types.Filter{} // Check if the tag is provided (":") @@ -187,10 +187,10 @@ func (d *Driver) getSubnetId(conn *ec2.Client, idTag string) (string, int64, err if strings.HasPrefix(idTag, "vpc-") { // Chose the less used subnet in VPC - var currCount int32 = 0 - var totalIpCount int64 = 0 + var currCount int32 + var totalIPCount int64 for _, subnet := range resp.Subnets { - totalIpCount += int64(aws.ToInt32(subnet.AvailableIpAddressCount)) + totalIPCount += int64(aws.ToInt32(subnet.AvailableIpAddressCount)) if currCount < aws.ToInt32(subnet.AvailableIpAddressCount) { idTag = aws.ToString(subnet.SubnetId) currCount = aws.ToInt32(subnet.AvailableIpAddressCount) @@ -199,7 +199,7 @@ func (d *Driver) getSubnetId(conn *ec2.Client, idTag string) (string, int64, err if currCount == 0 { return "", 0, fmt.Errorf("AWS: Subnets have no available IP addresses") } - return idTag, totalIpCount, nil + return idTag, totalIPCount, nil } else if idTag != aws.ToString(resp.Subnets[0].SubnetId) { return "", 0, fmt.Errorf("AWS: Unable to verify the subnet id: %q != %q", idTag, aws.ToString(resp.Subnets[0].SubnetId)) } @@ -208,7 +208,7 @@ func (d *Driver) getSubnetId(conn *ec2.Client, idTag string) (string, int64, err } // Will verify and return image id -func (d *Driver) getImageId(conn *ec2.Client, idName string) (string, error) { +func (d *Driver) getImageID(conn *ec2.Client, idName string) (string, error) { if strings.HasPrefix(idName, "ami-") { return idName, nil } @@ -237,7 +237,7 @@ func (d *Driver) getImageId(conn *ec2.Client, idName string) (string, error) { idName = aws.ToString(resp.Images[0].ImageId) // Getting the images and find the latest one - var foundId string + var foundID string var foundTime time.Time for p.HasMorePages() { resp, err := p.NextPage(context.TODO()) @@ -255,21 +255,21 @@ func (d *Driver) getImageId(conn *ec2.Client, idName string) (string, error) { continue } if foundTime.Before(t) { - foundId = aws.ToString(r.ImageId) + foundID = aws.ToString(r.ImageId) foundTime = t } } } - if foundId == "" { + if foundID == "" { return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", idName) } - return foundId, nil + return foundID, nil } // Types are used to calculate some not that obvious values -func (d *Driver) getTypes(conn *ec2.Client, instanceTypes []string) (map[string]types.InstanceTypeInfo, error) { +func (*Driver) getTypes(conn *ec2.Client, instanceTypes []string) (map[string]types.InstanceTypeInfo, error) { out := make(map[string]types.InstanceTypeInfo) req := ec2.DescribeInstanceTypesInput{} @@ -299,7 +299,7 @@ func (d *Driver) getTypes(conn *ec2.Client, instanceTypes []string) (map[string] } // Will return latest available image for the instance type -func (d *Driver) getImageIdByType(conn *ec2.Client, instanceType string) (string, error) { +func (d *Driver) getImageIDByType(conn *ec2.Client, instanceType string) (string, error) { log.Debug("AWS: Looking an image for type:", instanceType) instTypes, err := d.getTypes(conn, []string{instanceType}) @@ -356,18 +356,18 @@ func (d *Driver) getImageIdByType(conn *ec2.Client, instanceType string) (string continue } - imageId := aws.ToString(resp.Images[0].ImageId) + imageID := aws.ToString(resp.Images[0].ImageId) - log.Debugf("AWS: Found image for specified type %q (arch %s): %s", instanceType, typeArch, imageId) + log.Debugf("AWS: Found image for specified type %q (arch %s): %s", instanceType, typeArch, imageID) - return imageId, nil + return imageID, nil } return "", fmt.Errorf("AWS: Unable to locate image for type %q (arch %s) till year %d", instanceType, typeArch, imagesTill.Year()+1) } // Will verify and return security group id -func (d *Driver) getSecGroupId(conn *ec2.Client, idName string) (string, error) { +func (d *Driver) getSecGroupID(conn *ec2.Client, idName string) (string, error) { if strings.HasPrefix(idName, "sg-") { return idName, nil } @@ -400,7 +400,7 @@ func (d *Driver) getSecGroupId(conn *ec2.Client, idName string) (string, error) } // Will verify and return latest snapshot id -func (d *Driver) getSnapshotId(conn *ec2.Client, idTag string) (string, error) { +func (d *Driver) getSnapshotID(conn *ec2.Client, idTag string) (string, error) { if strings.HasPrefix(idTag, "snap-") { return idTag, nil } @@ -428,7 +428,7 @@ func (d *Driver) getSnapshotId(conn *ec2.Client, idTag string) (string, error) { p := ec2.NewDescribeSnapshotsPaginator(conn, &req) // Getting the snapshots to find the latest one - foundId := "" + foundID := "" var foundTime time.Time for p.HasMorePages() { resp, err := p.NextPage(context.TODO()) @@ -440,20 +440,20 @@ func (d *Driver) getSnapshotId(conn *ec2.Client, idTag string) (string, error) { } for _, r := range resp.Snapshots { if foundTime.Before(aws.ToTime(r.StartTime)) { - foundId = aws.ToString(r.SnapshotId) + foundID = aws.ToString(r.SnapshotId) foundTime = aws.ToTime(r.StartTime) } } } - if foundId == "" { + if foundID == "" { return "", fmt.Errorf("AWS: Unable to locate snapshot with specified tag: %s", idTag) } - return foundId, nil + return foundID, nil } -func (d *Driver) getProjectCpuUsage(conn *ec2.Client, instTypes []string) (int64, error) { +func (*Driver) getProjectCPUUsage(conn *ec2.Client, instTypes []string) (int64, error) { var cpuCount int64 // Here is no way to use some filter, so we're getting them all and after that @@ -488,12 +488,12 @@ func (d *Driver) getProjectCpuUsage(conn *ec2.Client, instTypes []string) (int64 return cpuCount, nil } -func (d *Driver) getInstance(conn *ec2.Client, instId string) (*types.Instance, error) { +func (*Driver) getInstance(conn *ec2.Client, instID string) (*types.Instance, error) { input := ec2.DescribeInstancesInput{ Filters: []types.Filter{ { Name: aws.String("instance-id"), - Values: []string{instId}, + Values: []string{instID}, }, }, } @@ -509,7 +509,7 @@ func (d *Driver) getInstance(conn *ec2.Client, instId string) (*types.Instance, } // Will get the kms key id based on alias if it's specified -func (d *Driver) getKeyId(idAlias string) (string, error) { +func (d *Driver) getKeyID(idAlias string) (string, error) { if !strings.HasPrefix(idAlias, "alias/") { return idAlias, nil } @@ -605,20 +605,20 @@ func awsInstTypeAny(val string, options ...string) bool { * Creates and immediately terminates instance to trigger scrubbing process on mac hosts. * Used during mac dedicated hosts pool management to deal with 24h limit to save on budget. */ -func (d *Driver) triggerHostScrubbing(hostId, instanceType string) (err error) { +func (d *Driver) triggerHostScrubbing(hostID, instanceType string) (err error) { conn := d.newEC2Conn() // Just need an image, which we could find by looking at the host instance type var vmImage string - if vmImage, err = d.getImageIdByType(conn, instanceType); err != nil { - return fmt.Errorf("AWS: scrubbing %s: Unable to find image: %v", hostId, err) + if vmImage, err = d.getImageIDByType(conn, instanceType); err != nil { + return fmt.Errorf("AWS: scrubbing %s: Unable to find image: %v", hostID, err) } - log.Infof("AWS: scrubbing %s: Selected image: %q", hostId, vmImage) + log.Infof("AWS: scrubbing %s: Selected image: %q", hostID, vmImage) // Prepare Instance request information placement := types.Placement{ Tenancy: types.TenancyHost, - HostId: aws.String(hostId), + HostId: aws.String(hostID), } input := ec2.RunInstancesInput{ ImageId: aws.String(vmImage), @@ -634,10 +634,10 @@ func (d *Driver) triggerHostScrubbing(hostId, instanceType string) (err error) { // Run the instance result, err := conn.RunInstances(context.TODO(), &input) if err != nil { - return log.Errorf("AWS: scrubbing %s: Unable to run instance: %v", hostId, err) + return log.Errorf("AWS: scrubbing %s: Unable to run instance: %v", hostID, err) } - instId := aws.ToString(result.Instances[0].InstanceId) + instID := aws.ToString(result.Instances[0].InstanceId) // Don't need to wait - let's terminate the instance right away // We need to terminate no matter wat - so repeating until it will be terminated, otherwise @@ -645,18 +645,18 @@ func (d *Driver) triggerHostScrubbing(hostId, instanceType string) (err error) { for { input := ec2.TerminateInstancesInput{ - InstanceIds: []string{instId}, + InstanceIds: []string{instID}, } result, err := conn.TerminateInstances(context.TODO(), &input) if err != nil || len(result.TerminatingInstances) < 1 { - log.Errorf("AWS: scrubbing %s: Error during termianting the instance %s: %s", hostId, instId, err) + log.Errorf("AWS: scrubbing %s: Error during termianting the instance %s: %s", hostID, instID, err) time.Sleep(10 * time.Second) continue } - if aws.ToString(result.TerminatingInstances[0].InstanceId) != instId { - log.Errorf("AWS: scrubbing %s: Wrong instance id result %s during terminating of %s", hostId, aws.ToString(result.TerminatingInstances[0].InstanceId), instId) + if aws.ToString(result.TerminatingInstances[0].InstanceId) != instID { + log.Errorf("AWS: scrubbing %s: Wrong instance id result %s during terminating of %s", hostID, aws.ToString(result.TerminatingInstances[0].InstanceId), instID) time.Sleep(10 * time.Second) continue } @@ -664,7 +664,7 @@ func (d *Driver) triggerHostScrubbing(hostId, instanceType string) (err error) { break } - log.Infof("AWS: scrubbing %s: Scrubbing process was triggered", hostId) + log.Infof("AWS: scrubbing %s: Scrubbing process was triggered", hostID) return nil } diff --git a/lib/drivers/docker/config.go b/lib/drivers/docker/config.go index da4e29d..45b3d4f 100644 --- a/lib/drivers/docker/config.go +++ b/lib/drivers/docker/config.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package docker implements driver package docker import ( @@ -21,6 +22,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// Config - node driver configuration type Config struct { DockerPath string `json:"docker_path"` // '/Applications/Docker.app/Contents/Resources/bin/docker' @@ -35,22 +37,23 @@ type Config struct { // for disk caching) // * Positive (>0) value could also be available (but check it in your docker dist in advance) // Please be careful here - noone wants the container to fail allocation because of that... - CpuAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) - RamAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) + CPUAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) + RAMAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) // Overbook options allows tenants to reuse the resources // It will be used only when overbook is allowed by the tenants. It works by just adding those // amounts to the existing total before checking availability. For example if you have 16CPU - // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CpuOverbook - + // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CPUOverbook - // to have virtually 28 CPUs. 3rd will not be running because 2 tenants will eat all 28 virtual // CPUs. Same applies to the RamOverbook. - CpuOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants - RamOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants + CPUOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants + RAMOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants DownloadUser string `json:"download_user"` // The user will be used in download operations DownloadPassword string `json:"download_password"` // The password will be used in download operations } +// Apply takes json and applies it to the config structure func (c *Config) Apply(config []byte) error { if len(config) > 0 { if err := json.Unmarshal(config, c); err != nil { @@ -60,6 +63,7 @@ func (c *Config) Apply(config []byte) error { return nil } +// Validate makes sure the config have the required defaults & that the required fields are set func (c *Config) Validate() (err error) { // Check that values of the config is filled at least with defaults if c.DockerPath == "" { @@ -88,9 +92,8 @@ func (c *Config) Validate() (err error) { if err := os.MkdirAll(c.ImagesPath, 0o750); err != nil { return err } - if err := os.MkdirAll(c.WorkspacePath, 0o750); err != nil { - return err - } - return nil + err = os.MkdirAll(c.WorkspacePath, 0o750) + + return err } diff --git a/lib/drivers/docker/driver.go b/lib/drivers/docker/driver.go index 23b173c..d37400e 100644 --- a/lib/drivers/docker/driver.go +++ b/lib/drivers/docker/driver.go @@ -30,14 +30,16 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) -// Implements drivers.ResourceDriverFactory interface +// Factory implements drivers.ResourceDriverFactory interface type Factory struct{} -func (f *Factory) Name() string { +// Name shows name of the driver factory +func (*Factory) Name() string { return "docker" } -func (f *Factory) NewResourceDriver() drivers.ResourceDriver { +// NewResourceDriver creates new resource driver +func (*Factory) NewResourceDriver() drivers.ResourceDriver { return &Driver{} } @@ -45,27 +47,30 @@ func init() { drivers.FactoryList = append(drivers.FactoryList, &Factory{}) } -// Implements drivers.ResourceDriver interface +// Driver implements drivers.ResourceDriver interface type Driver struct { cfg Config // Contains the available tasks of the driver tasksList []drivers.ResourceDriverTask - totalCpu uint // In logical threads - totalRam uint // In RAM megabytes + totalCPU uint // In logical threads + totalRAM uint // In RAM megabytes dockerUsageMutex sync.Mutex dockerUsage types.Resources // Used when the docker is remote } -func (d *Driver) Name() string { +// Name returns name of the driver +func (*Driver) Name() string { return "docker" } +// IsRemote needed to detect the out-of-node resources managed by this driver func (d *Driver) IsRemote() bool { return d.cfg.IsRemote } +// Prepare initializes the driver func (d *Driver) Prepare(config []byte) error { if err := d.cfg.Apply(config); err != nil { return err @@ -85,16 +90,16 @@ func (d *Driver) Prepare(config []byte) error { if len(cpuMem) < 2 { return fmt.Errorf("Docker: Not enough info values in return: %q", cpuMem) } - parsedCpu, err := strconv.ParseUint(cpuMem[0], 10, 64) + parsedCPU, err := strconv.ParseUint(cpuMem[0], 10, 64) if err != nil { return fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpuMem[0]) } - d.totalCpu = uint(parsedCpu / 1000000000) // Originally in NCPU - parsedRam, err := strconv.ParseUint(cpuMem[1], 10, 64) + d.totalCPU = uint(parsedCPU / 1000000000) // Originally in NCPU + parsedRAM, err := strconv.ParseUint(cpuMem[1], 10, 64) if err != nil { return fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpuMem[1]) } - d.totalRam = uint(parsedRam / 1073741824) // Get in GB + d.totalRAM = uint(parsedRAM / 1073741824) // Get in GB // Collect the current state of docker containers for validation (for example not controlled // containers) purposes - it will be actively used if docker driver is remote @@ -107,7 +112,8 @@ func (d *Driver) Prepare(config []byte) error { return nil } -func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { +// ValidateDefinition checks LabelDefinition is ok +func (*Driver) ValidateDefinition(def types.LabelDefinition) error { // Check resources if err := def.Resources.Validate([]string{"dir", "hfs+", "exfat", "fat32"}, true); err != nil { return log.Error("Docker: Resources validation failed:", err) @@ -118,7 +124,7 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { return opts.Apply(def.Options) } -// Allow Fish to ask the driver about it's capacity (free slots) of a specific definition +// AvailableCapacity allows Fish to ask the driver about it's capacity (free slots) of a specific definition func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { var outCount int64 @@ -129,13 +135,13 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef d.dockerUsageMutex.Unlock() } - availCpu, availRam := d.getAvailResources() + availCPU, availRAM := d.getAvailResources() // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > availCpu { + if req.Resources.Cpu > availCPU { return 0 } - if req.Resources.Ram > availRam { + if req.Resources.Ram > availRAM { return 0 } // TODO: Check disk requirements @@ -152,16 +158,16 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { - availCpu += d.cfg.CpuOverbook + availCPU += d.cfg.CPUOverbook } if nodeUsage.RamOverbook && req.Resources.RamOverbook { - availRam += d.cfg.RamOverbook + availRAM += d.cfg.RAMOverbook } } // Calculate how much of those definitions we could run - outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) - ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + outCount = int64((availCPU - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRAM - nodeUsage.Ram) / req.Resources.Ram) if outCount > ramCount { outCount = ramCount } @@ -170,12 +176,10 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef return outCount } -/** - * Allocate container out of the images - * - * It automatically download the required images, unpack them and runs the container. - * Using metadata to create env file and pass it to the container. - */ +// Allocate container out of the images +// +// It automatically download the required images, unpack them and runs the container. +// Using metadata to create env file and pass it to the container. func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { if d.cfg.IsRemote { // It's remote so let's use docker_usage to store modificators properly @@ -261,16 +265,18 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* return &types.Resource{Identifier: cName, HwAddr: cHwaddr}, nil } +// Status shows status of the resource func (d *Driver) Status(res *types.Resource) (string, error) { if res == nil || res.Identifier == "" { return "", fmt.Errorf("Docker: Invalid resource: %v", res) } - if len(d.getAllocatedContainerId(res.Identifier)) > 0 { + if len(d.getAllocatedContainerID(res.Identifier)) > 0 { return drivers.StatusAllocated, nil } return drivers.StatusNone, nil } +// GetTask returns task struct by name func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask @@ -291,6 +297,7 @@ func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { return t } +// Deallocate the resource func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return fmt.Errorf("Docker: Invalid resource: %v", res) @@ -301,14 +308,14 @@ func (d *Driver) Deallocate(res *types.Resource) error { defer d.dockerUsageMutex.Unlock() } cName := d.getContainerName(res.Identifier) - cId := d.getAllocatedContainerId(res.Identifier) - if len(cId) == 0 { + cID := d.getAllocatedContainerID(res.Identifier) + if len(cID) == 0 { return log.Error("Docker: Unable to find container with identifier:", res.Identifier) } // Getting the mounted volumes stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "inspect", - "--format", "{{ range .Mounts }}{{ println .Source }}{{ end }}", cId, + "--format", "{{ range .Mounts }}{{ println .Source }}{{ end }}", cID, ) if err != nil { return log.Error("Docker: Unable to inspect the container:", cName, err) @@ -317,7 +324,7 @@ func (d *Driver) Deallocate(res *types.Resource) error { if d.cfg.IsRemote { // Get the container CPU/RAM to subtract from the docker_usage - res, err := d.getContainersResources([]string{cId}) + res, err := d.getContainersResources([]string{cID}) if err != nil { return log.Error("Docker: Unable to collect the container resources:", cName, err) } @@ -326,11 +333,11 @@ func (d *Driver) Deallocate(res *types.Resource) error { } // Stop the container - if _, _, err := runAndLogRetry(3, 10*time.Second, d.cfg.DockerPath, "stop", cId); err != nil { + if _, _, err := runAndLogRetry(3, 10*time.Second, d.cfg.DockerPath, "stop", cID); err != nil { return log.Error("Docker: Unable to stop the container:", cName, err) } // Remove the container - if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "rm", cId); err != nil { + if _, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "rm", cID); err != nil { return log.Error("Docker: Unable to remove the container:", cName, err) } diff --git a/lib/drivers/docker/options.go b/lib/drivers/docker/options.go index 8d6fcc9..274fab0 100644 --- a/lib/drivers/docker/options.go +++ b/lib/drivers/docker/options.go @@ -20,21 +20,23 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -/** - * Options example: - * images: - * - url: https://artifact-storage/aquarium/image/docker/ubuntu2004/ubuntu2004-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef1 - * - url: https://artifact-storage/aquarium/image/docker/ubuntu2004-python3/ubuntu2004-python3-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef2 - * - url: https://artifact-storage/aquarium/image/docker/ubuntu2004-python3-ci/ubuntu2004-python3-ci-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef3 - */ +// Options for label definition +// +// Example: +// +// images: +// - url: https://artifact-storage/aquarium/image/docker/ubuntu2004/ubuntu2004-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef1 +// - url: https://artifact-storage/aquarium/image/docker/ubuntu2004-python3/ubuntu2004-python3-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef2 +// - url: https://artifact-storage/aquarium/image/docker/ubuntu2004-python3-ci/ubuntu2004-python3-ci-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef3 type Options struct { Images []drivers.Image `json:"images"` // List of image dependencies, last one is running one } -func (o *Options) Apply(options util.UnparsedJson) error { +// Apply takes json and applies it to the options structure +func (o *Options) Apply(options util.UnparsedJSON) error { if err := json.Unmarshal([]byte(options), o); err != nil { return log.Error("Docker: Unable to apply the driver options:", err) } @@ -42,6 +44,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { return o.Validate() } +// Validate makes sure the options have the required defaults & that the required fields are set func (o *Options) Validate() error { // Check images var imgErr error diff --git a/lib/drivers/docker/util.go b/lib/drivers/docker/util.go index 2856fb2..694089e 100644 --- a/lib/drivers/docker/util.go +++ b/lib/drivers/docker/util.go @@ -30,12 +30,12 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) -func (d *Driver) getContainersResources(containerIds []string) (types.Resources, error) { +func (d *Driver) getContainersResources(containerIDs []string) (types.Resources, error) { var out types.Resources // Getting current running containers info - will return ",\n..." for each one dockerArgs := []string{"inspect", "--format", "{{ .HostConfig.NanoCpus }},{{ .HostConfig.Memory }}"} - dockerArgs = append(dockerArgs, containerIds...) + dockerArgs = append(dockerArgs, containerIDs...) stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, dockerArgs...) if err != nil { return out, fmt.Errorf("Docker: Unable to inspect the containers to get used resources: %v", err) @@ -47,19 +47,19 @@ func (d *Driver) getContainersResources(containerIds []string) (types.Resources, if len(cpuMem) < 2 { return out, fmt.Errorf("Docker: Not enough info values in return: %q", resList) } - resCpu, err := strconv.ParseUint(cpuMem[0], 10, 64) + resCPU, err := strconv.ParseUint(cpuMem[0], 10, 64) if err != nil { return out, fmt.Errorf("Docker: Unable to parse CPU uint: %v (%q)", err, cpuMem[0]) } - resRam, err := strconv.ParseUint(cpuMem[1], 10, 64) + resRAM, err := strconv.ParseUint(cpuMem[1], 10, 64) if err != nil { return out, fmt.Errorf("Docker: Unable to parse RAM uint: %v (%q)", err, cpuMem[1]) } - if resCpu == 0 || resRam == 0 { - return out, fmt.Errorf("Docker: The container is non-Fish controlled zero-cpu/ram ones: %q", containerIds) + if resCPU == 0 || resRAM == 0 { + return out, fmt.Errorf("Docker: The container is non-Fish controlled zero-cpu/ram ones: %q", containerIDs) } - out.Cpu += uint(resCpu / 1000000000) // Originallly in NCPU - out.Ram += uint(resRam / 1073741824) // Get in GB + out.Cpu += uint(resCPU / 1000000000) // Originallly in NCPU + out.Ram += uint(resRAM / 1073741824) // Get in GB // TODO: Add disks too here } @@ -98,10 +98,10 @@ func (d *Driver) getInitialUsage() (types.Resources, error) { // There is more than one container is running so multitenancy is true out.Multitenancy = true } - if out.Cpu > d.totalCpu { + if out.Cpu > d.totalCPU { out.CpuOverbook = true } - if out.Ram > d.totalRam { + if out.Ram > d.totalRAM { out.RamOverbook = true } @@ -109,24 +109,24 @@ func (d *Driver) getInitialUsage() (types.Resources, error) { } // Collects the available resource with alteration -func (d *Driver) getAvailResources() (availCpu, availRam uint) { - if d.cfg.CpuAlter < 0 { - availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) +func (d *Driver) getAvailResources() (availCPU, availRAM uint) { + if d.cfg.CPUAlter < 0 { + availCPU = d.totalCPU - uint(-d.cfg.CPUAlter) } else { - availCpu = d.totalCpu + uint(d.cfg.CpuAlter) + availCPU = d.totalCPU + uint(d.cfg.CPUAlter) } - if d.cfg.RamAlter < 0 { - availRam = d.totalRam - uint(-d.cfg.RamAlter) + if d.cfg.RAMAlter < 0 { + availRAM = d.totalRAM - uint(-d.cfg.RAMAlter) } else { - availRam = d.totalRam + uint(d.cfg.RamAlter) + availRAM = d.totalRAM + uint(d.cfg.RAMAlter) } return } // Returns the standardized container name -func (d *Driver) getContainerName(hwaddr string) string { +func (*Driver) getContainerName(hwaddr string) string { return fmt.Sprintf("fish-%s", strings.ReplaceAll(hwaddr, ":", "")) } @@ -135,7 +135,7 @@ func (d *Driver) loadImages(opts *Options) (string, error) { // Download the images and unpack them var wg sync.WaitGroup for _, image := range opts.Images { - log.Info("Docker: Loading the required image:", image.Name, image.Version, image.Url) + log.Info("Docker: Loading the required image:", image.Name, image.Version, image.URL) // Running the background routine to download, unpack and process the image // Success will be checked later by existence of the image in local docker registry @@ -143,7 +143,7 @@ func (d *Driver) loadImages(opts *Options) (string, error) { go func(image drivers.Image) { defer wg.Done() if err := image.DownloadUnpack(d.cfg.ImagesPath, d.cfg.DownloadUser, d.cfg.DownloadPassword); err != nil { - log.Error("Docker: Unable to download and unpack the image:", image.Name, image.Url, err) + log.Error("Docker: Unable to download and unpack the image:", image.Name, image.URL, err) } }(image) } @@ -252,7 +252,7 @@ func (d *Driver) loadImages(opts *Options) (string, error) { } // Receives the container ID out of the container name -func (d *Driver) getAllocatedContainerId(cName string) string { +func (d *Driver) getAllocatedContainerID(cName string) string { // Probably it's better to store the current list in the memory stdout, _, err := runAndLog(5*time.Second, d.cfg.DockerPath, "ps", "-a", "-q", "--filter", "name="+cName) if err != nil { diff --git a/lib/drivers/driver.go b/lib/drivers/driver.go index 1100da9..596d548 100644 --- a/lib/drivers/driver.go +++ b/lib/drivers/driver.go @@ -10,20 +10,23 @@ * governing permissions and limitations under the License. */ +// Package drivers implements interface for each driver (resource provider) package drivers import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// Status of the driver returned by Status() const ( StatusNone = "NONE" StatusAllocated = "ALLOCATED" ) +// FactoryList is a list of available drivers factories var FactoryList []ResourceDriverFactory -// Factory allows to generate new instances of the drivers +// ResourceDriverFactory allows to generate new instances of the drivers type ResourceDriverFactory interface { // Name of the driver Name() string @@ -32,6 +35,7 @@ type ResourceDriverFactory interface { NewResourceDriver() ResourceDriver } +// ResourceDriver interface of the functions that connects Fish to each driver type ResourceDriver interface { // Name of the driver Name() string diff --git a/lib/drivers/image.go b/lib/drivers/image.go index 505af43..511dfd0 100644 --- a/lib/drivers/image.go +++ b/lib/drivers/image.go @@ -37,7 +37,7 @@ import ( // Image definition type Image struct { - Url string `json:"url"` // Address of the remote image to download it + URL string `json:"url"` // Address of the remote image to download it Sum string `json:"sum"` // Optional checksum of the image in format ":" Name string `json:"name"` // Optional name of the image, if not set will use a part of the Url file name prior to last minus ("-") or ext @@ -46,20 +46,21 @@ type Image struct { Tag string `json:"tag"` // Optional identifier used by drivers to make sure the images will be processed properly } +// Validate makes sure the image spec is good enough func (i *Image) Validate() error { // Check url is defined - if i.Url == "" { + if i.URL == "" { return fmt.Errorf("Image: Url is not provided") } // Check url schema is supported - if !(strings.HasPrefix(i.Url, "http://") || strings.HasPrefix(i.Url, "https://")) { - return fmt.Errorf("Image: Url schema is not supported: %q", i.Url) + if !(strings.HasPrefix(i.URL, "http://") || strings.HasPrefix(i.URL, "https://")) { + return fmt.Errorf("Image: Url schema is not supported: %q", i.URL) } // Fill name out of image url if i.Name == "" { - i.Name = path.Base(i.Url) + i.Name = path.Base(i.URL) minusLoc := strings.LastIndexByte(i.Name, '-') if minusLoc != -1 { // Use the part from beginning to last minus ('-') - useful to separate version part @@ -77,7 +78,7 @@ func (i *Image) Validate() error { // Fill version out of image url if i.Version == "" { - i.Version = path.Base(i.Url) + i.Version = path.Base(i.URL) minusLoc := strings.LastIndexByte(i.Version, '-') if minusLoc != -1 { // Use the part from the last minus ('-') to the end @@ -112,13 +113,13 @@ func (i *Image) Validate() error { return nil } -// Stream function to download and unpack image archive without using a storage file to make it as +// DownloadUnpack is a stream function to download and unpack image archive without using a storage file to make it as // quick as possible. // -> out_dir - is the directory where the image will be placed. It will be unpacked to out_dir/Name-Version/ // -> user, password - credentials for HTTP Basic auth func (i *Image) DownloadUnpack(outDir, user, password string) error { imgPath := filepath.Join(outDir, i.Name+"-"+i.Version) - log.Debug("Image: Downloading & Unpacking image:", i.Url, imgPath) + log.Debug("Image: Downloading & Unpacking image:", i.URL, imgPath) lockPath := imgPath + ".lock" // Wait for another process to download and unpack the archive @@ -140,20 +141,20 @@ func (i *Image) DownloadUnpack(outDir, user, password string) error { defer os.Remove(lockPath) client := &http.Client{} - req, _ := http.NewRequestWithContext(context.TODO(), http.MethodGet, i.Url, nil) + req, _ := http.NewRequestWithContext(context.TODO(), http.MethodGet, i.URL, nil) if user != "" && password != "" { req.SetBasicAuth(user, password) } resp, err := client.Do(req) if err != nil { os.RemoveAll(imgPath) - return fmt.Errorf("Image: Unable to request url %q: %v", i.Url, err) + return fmt.Errorf("Image: Unable to request url %q: %v", i.URL, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { os.RemoveAll(imgPath) - return fmt.Errorf("Image: Unable to download file %q: %s", i.Url, resp.Status) + return fmt.Errorf("Image: Unable to download file %q: %s", i.URL, resp.Status) } // Printing the download progress @@ -197,7 +198,7 @@ func (i *Image) DownloadUnpack(outDir, user, password string) error { if remoteSum != algoSum[1] { os.RemoveAll(imgPath) return fmt.Errorf("Image: The remote checksum (from header X-Checksum-%s) doesn't equal the desired one: %q != %q for %q", - strings.Title(algoSum[0]), remoteSum, algoSum[1], i.Url) //nolint:staticcheck // SA1019 Strictly ASCII here + strings.Title(algoSum[0]), remoteSum, algoSum[1], i.URL) //nolint:staticcheck // SA1019 Strictly ASCII here } } } @@ -253,7 +254,6 @@ func (i *Image) DownloadUnpack(outDir, user, password string) error { os.RemoveAll(imgPath) return fmt.Errorf("Image: Unable to open file %q for unpack: %v", target, err) } - defer w.Close() // TODO: Add in-stream sha256 calculation for each file to verify against .sha256 data for { @@ -264,8 +264,10 @@ func (i *Image) DownloadUnpack(outDir, user, password string) error { break } os.RemoveAll(imgPath) + w.Close() return fmt.Errorf("Image: Unable to unpack content to file %q: %v", target, err) } + w.Close() } } @@ -279,7 +281,7 @@ func (i *Image) DownloadUnpack(outDir, user, password string) error { if calculatedSum != algoSum[1] { os.RemoveAll(imgPath) return fmt.Errorf("Image: The calculated checksum doesn't equal the desired one: %q != %q for %q", - calculatedSum, algoSum[1], i.Url) + calculatedSum, algoSum[1], i.URL) } } diff --git a/lib/drivers/image_test.go b/lib/drivers/image_test.go index 3b9c267..0def01d 100644 --- a/lib/drivers/image_test.go +++ b/lib/drivers/image_test.go @@ -30,59 +30,59 @@ var server *httptest.Server func Test_image_validate(t *testing.T) { t.Run("good_url", func(t *testing.T) { image := Image{ - Url: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", + URL: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", } if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } if image.Name != "test-image-ci" { - t.Fatalf(`image.Validate() = %q, name is not equal the expected one: %v`, image.Url, image.Name) + t.Fatalf(`image.Validate() = %q, name is not equal the expected one: %v`, image.URL, image.Name) } if image.Version != "20230210.190425_ff1cd1cf" { - t.Fatalf(`image.Validate() = %q, version is not equal the expected one: %v`, image.Url, image.Version) + t.Fatalf(`image.Validate() = %q, version is not equal the expected one: %v`, image.URL, image.Version) } }) t.Run("bad_url_empty", func(t *testing.T) { image := Image{ - Url: "", + URL: "", } if err := image.Validate(); err == nil || err.Error() != "Image: Url is not provided" { - t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.URL, err) } }) t.Run("bad_url_schema", func(t *testing.T) { image := Image{ - Url: "ftp://tst", + URL: "ftp://tst", } if err := image.Validate(); err == nil || err.Error() != `Image: Url schema is not supported: "ftp://tst"` { - t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.URL, err) } }) t.Run("good_sum", func(t *testing.T) { image := Image{ - Url: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", + URL: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", Sum: "sha256:0123456789abcdef0123456789abcdef0123456789abcdef", } if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } }) t.Run("badsum_algo", func(t *testing.T) { image := Image{ - Url: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", + URL: "https://example.org/aquarium/image/test/test-image-ci/test-image-ci-20230210.190425_ff1cd1cf.tar.xz", Sum: "incorrect:0123456789abcdef0123456789abcdef0123456789abcdef", } if err := image.Validate(); err == nil || err.Error() != `Image: Checksum with not supported algorithm (md5, sha1, sha256, sha512): "incorrect"` { - t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, URL error expected, but incorrect was returned: %v`, image.URL, err) } }) } @@ -114,72 +114,72 @@ func Test_image_downloadunpack(t *testing.T) { t.Run("good", func(t *testing.T) { image := Image{ - Url: server.URL + testImageCiPath, + URL: server.URL + testImageCiPath, Sum: "sha256:" + testImageCiSha256, } // Make sure image is ok if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } // Download/unpack into temp directory if err := image.DownloadUnpack(t.TempDir(), "user", "password"); err != nil { - t.Fatalf(`image.DownloadUnpack() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.DownloadUnpack() = %q, unexpected error: %v`, image.URL, err) } }) t.Run("bad_url", func(t *testing.T) { image := Image{ - Url: server.URL + "/not/existing/artifact-version.tar.xz", + URL: server.URL + "/not/existing/artifact-version.tar.xz", Sum: "sha256:" + testImageCiSha256, } // Make sure image is ok if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } // Download/unpack into temp directory err := image.DownloadUnpack(t.TempDir(), "user", "password") if err == nil || err.Error() != `Image: Unable to download file "`+server.URL+`/not/existing/artifact-version.tar.xz": 404 Not Found` { - t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.URL, err) } }) t.Run("bad_header_checksum", func(t *testing.T) { image := Image{ - Url: server.URL + testImageCiPath, + URL: server.URL + testImageCiPath, Sum: "sha256:0123456789abcdef", } // Make sure image is ok if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } // Download/unpack into temp directory err := image.DownloadUnpack(t.TempDir(), "user", "password") if err == nil || err.Error() != `Image: The remote checksum (from header X-Checksum-Sha256) doesn't equal the desired one: "`+testImageCiSha256+`" != "0123456789abcdef" for "`+server.URL+testImageCiPath+`"` { - t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.URL, err) } }) t.Run("bad_calculated_checksum", func(t *testing.T) { image := Image{ - Url: server.URL + testImageCiPath + "?nosumheader", + URL: server.URL + testImageCiPath + "?nosumheader", Sum: "sha256:0123456789abcdef", } // Make sure image is ok if err := image.Validate(); err != nil { - t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.Url, err) + t.Fatalf(`image.Validate() = %q, unexpected error: %v`, image.URL, err) } // Download/unpack into temp directory err := image.DownloadUnpack(t.TempDir(), "user", "password") if err == nil || err.Error() != `Image: The calculated checksum doesn't equal the desired one: "`+testImageCiSha256+`" != "0123456789abcdef" for "`+server.URL+testImageCiPath+`?nosumheader"` { - t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.Url, err) + t.Fatalf(`image.DownloadUnpack() = %q, error expected, but incorrect was returned: %v`, image.URL, err) } }) } diff --git a/lib/drivers/native/config.go b/lib/drivers/native/config.go index 145a848..b508341 100644 --- a/lib/drivers/native/config.go +++ b/lib/drivers/native/config.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package native implements driver package native import ( @@ -25,6 +26,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// Config - node driver configuration type Config struct { //TODO: Users []string `json:"users"` // List of precreated OS user names in format "user[:password]" to run the workload @@ -52,22 +54,23 @@ type Config struct { // for disk caching) // * Positive (>0) is also available, but you're going to put more load on the scheduler // Please be careful here - noone wants the workload to fail allocation because of that... - CpuAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) - RamAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) + CPUAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) + RAMAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) // Overbook options allows tenants to reuse the resources // It will be used only when overbook is allowed by the tenants. It works by just adding those // amounts to the existing total before checking availability. For example if you have 16CPU - // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CpuOverbook - + // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CPUOverbook - // to have virtually 28 CPUs. 3rd will not be running because 2 tenants will eat all 28 virtual // CPUs. Same applies to the RamOverbook. - CpuOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants - RamOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants + CPUOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants + RAMOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants DownloadUser string `json:"download_user"` // The user will be used to auth in download operations DownloadPassword string `json:"download_password"` // The password will be used to auth in download operations } +// Apply takes json and applies it to the config structure func (c *Config) Apply(config []byte) (err error) { if len(config) > 0 { if err = json.Unmarshal(config, c); err != nil { @@ -95,13 +98,13 @@ func (c *Config) Apply(config []byte) (err error) { if err = os.MkdirAll(c.ImagesPath, 0o750); err != nil { return err } - if err = os.MkdirAll(c.WorkspacePath, 0o750); err != nil { - return err - } - return nil + err = os.MkdirAll(c.WorkspacePath, 0o750) + + return err } +// Validate makes sure the config have the required defaults & that the required fields are set func (c *Config) Validate() (err error) { // Sudo is used to run commands from superuser and execute a number of // administrative actions to create/delete the user and cleanup @@ -112,10 +115,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.SudoPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `sudo` path: %s, %s", c.SudoPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `sudo` binary is not executable: %s", c.SudoPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `sudo` binary is not executable: %s", c.SudoPath) } } @@ -127,10 +128,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.SuPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `su` path: %s, %s", c.SuPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `su` binary is not executable: %s", c.SuPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `su` binary is not executable: %s", c.SuPath) } } @@ -142,10 +141,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.ShPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `sh` path: %s, %s", c.ShPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `sh` binary is not executable: %s", c.ShPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `sh` binary is not executable: %s", c.ShPath) } } // Tar used to unpack the images @@ -156,10 +153,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.TarPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `tar` path: %s, %s", c.TarPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `tar` binary is not executable: %s", c.TarPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `tar` binary is not executable: %s", c.TarPath) } } // Mount allows to look at the mounted volumes @@ -170,10 +165,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.MountPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `mount` path: %s, %s", c.MountPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `mount` binary is not executable: %s", c.MountPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `mount` binary is not executable: %s", c.MountPath) } } // Chown needed to properly set ownership for the unprevileged user on available resources @@ -184,10 +177,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.ChownPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `chown` path: %s, %s", c.ChownPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `chown` binary is not executable: %s", c.ChownPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `chown` binary is not executable: %s", c.ChownPath) } } // Chmod needed to set additional read access for the unprevileged user on env metadata file @@ -198,10 +189,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.ChmodPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `chmod` path: %s, %s", c.ChmodPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `chmod` binary is not executable: %s", c.ChmodPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `chmod` binary is not executable: %s", c.ChmodPath) } } // Killall is running to stop all the unprevileged user processes during deallocation @@ -212,10 +201,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.KillallPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `killall` path: %s, %s", c.KillallPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `killall` binary is not executable: %s", c.KillallPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `killall` binary is not executable: %s", c.KillallPath) } } // Rm allows to clean up the leftowers after the execution @@ -226,10 +213,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.RmPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate `rm` path: %s, %s", c.RmPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: `rm` binary is not executable: %s", c.RmPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: `rm` binary is not executable: %s", c.RmPath) } } @@ -242,10 +227,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.DsclPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `dscl` path: %s, %s", c.DsclPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: macos `dscl` binary is not executable: %s", c.DsclPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: macos `dscl` binary is not executable: %s", c.DsclPath) } } // Hdiutil allows to create disk images and mount them to restrict user by disk space @@ -256,10 +239,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.HdiutilPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `hdiutil` path: %s, %s", c.HdiutilPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: macos `hdiutil` binary is not executable: %s", c.HdiutilPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: macos `hdiutil` binary is not executable: %s", c.HdiutilPath) } } // Mdutil allows to disable the indexing for mounted volume @@ -270,10 +251,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.MdutilPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `mdutil` path: %s, %s", c.MdutilPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: macos `mdutil` binary is not executable: %s", c.MdutilPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: macos `mdutil` binary is not executable: %s", c.MdutilPath) } } // Createhomedir creates unprevileged user home directory and fulfills with default subdirs @@ -284,10 +263,8 @@ func (c *Config) Validate() (err error) { } else { if info, err := os.Stat(c.CreatehomedirPath); os.IsNotExist(err) { return fmt.Errorf("Native: Unable to locate macos `createhomedir` path: %s, %s", c.CreatehomedirPath, err) - } else { - if info.Mode()&0o111 == 0 { - return fmt.Errorf("Native: macos `createhomedir` binary is not executable: %s", c.CreatehomedirPath) - } + } else if info.Mode()&0o111 == 0 { + return fmt.Errorf("Native: macos `createhomedir` binary is not executable: %s", c.CreatehomedirPath) } } @@ -341,8 +318,8 @@ func (c *Config) Validate() (err error) { return err } - if c.CpuAlter < 0 && cpuStat <= -c.CpuAlter { - return log.Errorf("Native: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpuStat) + if c.CPUAlter < 0 && cpuStat <= -c.CPUAlter { + return log.Errorf("Native: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CPUAlter, cpuStat) } memStat, err := mem.VirtualMemory() @@ -351,8 +328,8 @@ func (c *Config) Validate() (err error) { } ramStat := memStat.Total / 1073741824 // Getting GB from Bytes - if c.RamAlter < 0 && int(ramStat) <= -c.RamAlter { - return log.Errorf("Native: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ramStat) + if c.RAMAlter < 0 && int(ramStat) <= -c.RAMAlter { + return log.Errorf("Native: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RAMAlter, ramStat) } return nil diff --git a/lib/drivers/native/driver.go b/lib/drivers/native/driver.go index 7d0ca27..75dc8cb 100644 --- a/lib/drivers/native/driver.go +++ b/lib/drivers/native/driver.go @@ -26,14 +26,16 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) -// Implements drivers.ResourceDriverFactory interface +// Factory implements drivers.ResourceDriverFactory interface type Factory struct{} -func (f *Factory) Name() string { +// Name shows name of the driver factory +func (*Factory) Name() string { return "native" } -func (f *Factory) NewResourceDriver() drivers.ResourceDriver { +// NewResourceDriver creates new resource driver +func (*Factory) NewResourceDriver() drivers.ResourceDriver { return &Driver{} } @@ -41,29 +43,32 @@ func init() { drivers.FactoryList = append(drivers.FactoryList, &Factory{}) } -// Implements drivers.ResourceDriver interface +// Driver implements drivers.ResourceDriver interface type Driver struct { cfg Config // Contains the available tasks of the driver tasksList []drivers.ResourceDriverTask - totalCpu uint // In logical threads - totalRam uint // In RAM GB + totalCPU uint // In logical threads + totalRAM uint // In RAM GB } -// Is used to provide some data to the entry/metadata values which could contain templates +// EnvData is used to provide some data to the entry/metadata values which could contain templates type EnvData struct { Disks map[string]string // Map with disk_name = mount_path } -func (d *Driver) Name() string { +// Name returns name of the driver +func (*Driver) Name() string { return "native" } -func (d *Driver) IsRemote() bool { +// IsRemote needed to detect the out-of-node resources managed by this driver +func (*Driver) IsRemote() bool { return false } +// Prepare initializes the driver func (d *Driver) Prepare(config []byte) error { if err := d.cfg.Apply(config); err != nil { return err @@ -77,20 +82,21 @@ func (d *Driver) Prepare(config []byte) error { if err != nil { return err } - d.totalCpu = uint(cpuStat) + d.totalCPU = uint(cpuStat) memStat, err := mem.VirtualMemory() if err != nil { return err } - d.totalRam = uint(memStat.Total / 1073741824) // Getting GB from Bytes + d.totalRAM = uint(memStat.Total / 1073741824) // Getting GB from Bytes // TODO: Cleanup the image directory in case the images are not good return nil } -func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { +// ValidateDefinition checks LabelDefinition is ok +func (*Driver) ValidateDefinition(def types.LabelDefinition) error { // Check options var opts Options if err := opts.Apply(def.Options); err != nil { @@ -115,7 +121,7 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { return nil } -// Allow Fish to ask the driver about it's capacity (free slots) of a specific definition +// AvailableCapacity allows Fish to ask the driver about it's capacity (free slots) of a specific definition func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { var outCount int64 @@ -125,11 +131,11 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef } // Check if the node has the required resources - otherwise we can't run it anyhow - availCpu, availRam := d.getAvailResources() - if req.Resources.Cpu > availCpu { + availCPU, availRAM := d.getAvailResources() + if req.Resources.Cpu > availCPU { return 0 } - if req.Resources.Ram > availRam { + if req.Resources.Ram > availRAM { return 0 } // TODO: Check disk requirements @@ -146,16 +152,16 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { - availCpu += d.cfg.CpuOverbook + availCPU += d.cfg.CPUOverbook } if nodeUsage.RamOverbook && req.Resources.RamOverbook { - availRam += d.cfg.RamOverbook + availRAM += d.cfg.RAMOverbook } } // Calculate how much of those definitions we could run - outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) - ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + outCount = int64((availCPU - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRAM - nodeUsage.Ram) / req.Resources.Ram) if outCount > ramCount { outCount = ramCount } @@ -164,12 +170,10 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef return outCount } -/** - * Allocate workload environment with the provided images - * - * It automatically download the required images, unpack them and runs the workload. - * Using metadata to pass the env to the entry point of the image. - */ +// Allocate workload environment with the provided images +// +// It automatically download the required images, unpack them and runs the workload. +// Using metadata to pass the env to the entry point of the image. func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { var opts Options if err := opts.Apply(def.Options); err != nil { @@ -214,7 +218,8 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* return &types.Resource{Identifier: user}, nil } -func (d *Driver) Status(res *types.Resource) (string, error) { +// Status shows status of the resource +func (*Driver) Status(res *types.Resource) (string, error) { if res == nil || res.Identifier == "" { return "", fmt.Errorf("Native: Invalid resource: %v", res) } @@ -224,6 +229,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { return drivers.StatusNone, nil } +// GetTask returns task struct by name func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask @@ -244,6 +250,7 @@ func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { return t } +// Deallocate the resource func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return fmt.Errorf("Native: Invalid resource: %v", res) diff --git a/lib/drivers/native/options.go b/lib/drivers/native/options.go index 71a0a74..7be30e4 100644 --- a/lib/drivers/native/options.go +++ b/lib/drivers/native/options.go @@ -24,20 +24,21 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -/** - * Options example: - * images: - * - url: https://artifact-storage/aquarium/image/native/macos-VERSION/macos-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef1 - * tag: ws # The same as a name of disk in Label resource definition - * - url: https://artifact-storage/aquarium/image/native/macos_amd64-ci-VERSION/macos_amd64-ci-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef2 - * tag: ws - * entry: "{{ .Disks.ws }}/init.sh" # CWD is user home - * groups: - * - staff - * - importantgroup - */ +// Options for label definition +// +// Example: +// +// images: +// - url: https://artifact-storage/aquarium/image/native/macos-VERSION/macos-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef1 +// tag: ws # The same as a name of disk in Label resource definition +// - url: https://artifact-storage/aquarium/image/native/macos_amd64-ci-VERSION/macos_amd64-ci-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef2 +// tag: ws +// entry: "{{ .Disks.ws }}/init.sh" # CWD is user home +// groups: +// - staff +// - importantgroup type Options struct { Images []drivers.Image `json:"images"` // Optional list of image dependencies, they will be unpacked in order //TODO: Setup string `json:"setup"` // Optional path to the executable, it will be started before the Entry with escalated privileges @@ -45,7 +46,8 @@ type Options struct { Groups []string `json:"groups"` // Optional user groups user should have, first one is primary (default: staff) } -func (o *Options) Apply(options util.UnparsedJson) error { +// Apply takes json and applies it to the options structure +func (o *Options) Apply(options util.UnparsedJSON) error { if err := json.Unmarshal([]byte(options), o); err != nil { return log.Error("Native: Unable to apply the driver definition", err) } @@ -53,6 +55,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { return o.Validate() } +// Validate makes sure the options have the required defaults & that the required fields are set // Note: there is no mandatory options, because in theory the native env could be pre-created func (o *Options) Validate() error { // Set default entry diff --git a/lib/drivers/native/util.go b/lib/drivers/native/util.go index dd1be19..4970612 100644 --- a/lib/drivers/native/util.go +++ b/lib/drivers/native/util.go @@ -41,17 +41,17 @@ import ( var userCreateLock sync.Mutex // Returns the total resources available for the node after alteration -func (d *Driver) getAvailResources() (availCpu, availRam uint) { - if d.cfg.CpuAlter < 0 { - availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) +func (d *Driver) getAvailResources() (availCPU, availRAM uint) { + if d.cfg.CPUAlter < 0 { + availCPU = d.totalCPU - uint(-d.cfg.CPUAlter) } else { - availCpu = d.totalCpu + uint(d.cfg.CpuAlter) + availCPU = d.totalCPU + uint(d.cfg.CPUAlter) } - if d.cfg.RamAlter < 0 { - availRam = d.totalRam - uint(-d.cfg.RamAlter) + if d.cfg.RAMAlter < 0 { + availRAM = d.totalRAM - uint(-d.cfg.RAMAlter) } else { - availRam = d.totalRam + uint(d.cfg.RamAlter) + availRAM = d.totalRAM + uint(d.cfg.RAMAlter) } return @@ -61,14 +61,14 @@ func (d *Driver) getAvailResources() (availCpu, availRam uint) { func (d *Driver) loadImages(user string, images []drivers.Image, diskPaths map[string]string) error { var wg sync.WaitGroup for _, image := range images { - log.Info("Native: Loading the required image:", image.Name, image.Version, image.Url) + log.Info("Native: Loading the required image:", image.Name, image.Version, image.URL) // Running the background routine to download, unpack and process the image wg.Add(1) go func(image drivers.Image) { defer wg.Done() if err := image.DownloadUnpack(d.cfg.ImagesPath, d.cfg.DownloadUser, d.cfg.DownloadPassword); err != nil { - log.Error("Native: Unable to download and unpack the image:", image.Name, image.Url, err) + log.Error("Native: Unable to download and unpack the image:", image.Name, image.URL, err) } }(image) } @@ -118,9 +118,9 @@ func (d *Driver) loadImages(user string, images []drivers.Image, diskPaths map[s if err != nil { return log.Error("Native: Unable to read the image:", imageArchive, err) } - defer f.Close() log.Info("Native: Unpacking image:", user, imageArchive, unpackPath) _, _, err = runAndLog(5*time.Minute, f, d.cfg.SudoPath, "-n", d.cfg.TarPath, "-xf", "-", "--uname", user, "-C", unpackPath+"/") + f.Close() if err != nil { return log.Error("Native: Unable to unpack the image:", imageArchive, err) } @@ -169,22 +169,22 @@ func userCreate(c *Config, groups []string) (user, homedir string, err error) { } // Finding the max user id in the OS - userId := int64(1000) // Min 1000 is ok for most of the unix systems + userID := int64(1000) // Min 1000 is ok for most of the unix systems splitStdout := strings.Split(strings.TrimSpace(stdout), "\n") for _, line := range splitStdout { - lineId := line[strings.LastIndex(line, " ")+1:] - lineIdNum, err := strconv.ParseInt(lineId, 10, 64) + lineID := line[strings.LastIndex(line, " ")+1:] + lineIDNum, err := strconv.ParseInt(lineID, 10, 64) if err != nil { log.Warnf("Native: Unable to parse user id from line: %q", line) continue } - if lineIdNum > userId { - userId = lineIdNum + if lineIDNum > userID { + userID = lineIDNum } } // Increment max user id and use it as unique id for new user - if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "UniqueID", fmt.Sprint(userId+1)); err != nil { + if _, _, err = runAndLog(5*time.Second, nil, c.SudoPath, "-n", c.DsclPath, ".", "create", "/Users/"+user, "UniqueID", fmt.Sprint(userID+1)); err != nil { userCreateLock.Unlock() err = log.Error("Native: Unable to set user UniqueID:", err) return diff --git a/lib/drivers/task.go b/lib/drivers/task.go index fe1a259..3eb98d7 100644 --- a/lib/drivers/task.go +++ b/lib/drivers/task.go @@ -16,6 +16,7 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// ResourceDriverTask is interface for driver tasks execution type ResourceDriverTask interface { // Name of the task Name() string diff --git a/lib/drivers/test/config.go b/lib/drivers/test/config.go index 85d08cf..da05d36 100644 --- a/lib/drivers/test/config.go +++ b/lib/drivers/test/config.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package test implements mock driver package test import ( @@ -20,16 +21,17 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// Config - node driver configuration type Config struct { IsRemote bool `json:"is_remote"` // Pretend to be remote or not to check the local node limits WorkspacePath string `json:"workspace_path"` // Where to place the files of running resources - CpuLimit uint `json:"cpu_limit"` // Number of available virtual CPUs, 0 - unlimited - RamLimit uint `json:"ram_limit"` // Amount of available virtual RAM (GB), 0 - unlimited + CPULimit uint `json:"cpu_limit"` // Number of available virtual CPUs, 0 - unlimited + RAMLimit uint `json:"ram_limit"` // Amount of available virtual RAM (GB), 0 - unlimited - CpuOverbook uint `json:"cpu_overbook"` // How many CPUs available for overbook - RamOverbook uint `json:"ram_overbook"` // How much RAM (GB) available for overbook + CPUOverbook uint `json:"cpu_overbook"` // How many CPUs available for overbook + RAMOverbook uint `json:"ram_overbook"` // How much RAM (GB) available for overbook FailConfigApply uint8 `json:"fail_config_apply"` // Fail on config Apply (0 - not, 1-254 random, 255-yes) FailConfigValidate uint8 `json:"fail_config_validate"` // Fail on config Validation (0 - not, 1-254 random, 255-yes) @@ -38,6 +40,7 @@ type Config struct { FailDeallocate uint8 `json:"fail_deallocate"` // Fail on Deallocate (0 - not, 1-254 random, 255-yes) } +// Apply takes json and applies it to the config structure func (c *Config) Apply(config []byte) error { // Parse json if len(config) > 0 { @@ -49,6 +52,7 @@ func (c *Config) Apply(config []byte) error { return randomFail("ConfigApply", c.FailConfigApply) } +// Validate makes sure the config have the required defaults & that the required fields are set func (c *Config) Validate() (err error) { if c.WorkspacePath == "" { c.WorkspacePath = "fish_test_workspace" diff --git a/lib/drivers/test/driver.go b/lib/drivers/test/driver.go index 38ab2c3..11d7ce9 100644 --- a/lib/drivers/test/driver.go +++ b/lib/drivers/test/driver.go @@ -27,14 +27,16 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) -// Implements drivers.ResourceDriverFactory interface +// Factory implements drivers.ResourceDriverFactory interface type Factory struct{} -func (f *Factory) Name() string { +// Name shows name of the driver factory +func (*Factory) Name() string { return "test" } -func (f *Factory) NewResourceDriver() drivers.ResourceDriver { +// NewResourceDriver creates new resource driver +func (*Factory) NewResourceDriver() drivers.ResourceDriver { return &Driver{} } @@ -42,21 +44,24 @@ func init() { drivers.FactoryList = append(drivers.FactoryList, &Factory{}) } -// Implements drivers.ResourceDriver interface +// Driver implements drivers.ResourceDriver interface type Driver struct { cfg Config // Contains the available tasks of the driver tasksList []drivers.ResourceDriverTask } -func (d *Driver) Name() string { +// Name returns name of the driver +func (*Driver) Name() string { return "test" } +// IsRemote needed to detect the out-of-node resources managed by this driver func (d *Driver) IsRemote() bool { return d.cfg.IsRemote } +// Prepare initializes the driver func (d *Driver) Prepare(config []byte) error { if err := d.cfg.Apply(config); err != nil { return err @@ -71,12 +76,13 @@ func (d *Driver) Prepare(config []byte) error { return nil } -func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { +// ValidateDefinition checks LabelDefinition is ok +func (*Driver) ValidateDefinition(def types.LabelDefinition) error { var opts Options return opts.Apply(def.Options) } -// Allow Fish to ask the driver about it's capacity (free slots) of a specific definition +// AvailableCapacity allows Fish to ask the driver about it's capacity (free slots) of a specific definition func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { var outCount int64 @@ -91,19 +97,19 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef return -1 } - totalCpu := d.cfg.CpuLimit - totalRam := d.cfg.RamLimit + totalCPU := d.cfg.CPULimit + totalRAM := d.cfg.RAMLimit - if totalCpu == 0 && totalRam == 0 { + if totalCPU == 0 && totalRAM == 0 { // Resources are unlimited return 99999 } // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > totalCpu { + if req.Resources.Cpu > totalCPU { return 0 } - if req.Resources.Ram > totalRam { + if req.Resources.Ram > totalRAM { return 0 } // TODO: Check disk requirements @@ -120,16 +126,16 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { - totalCpu += d.cfg.CpuOverbook + totalCPU += d.cfg.CPUOverbook } if nodeUsage.RamOverbook && req.Resources.RamOverbook { - totalRam += d.cfg.RamOverbook + totalRAM += d.cfg.RAMOverbook } } // Calculate how much of those definitions we could run - outCount = int64((totalCpu - nodeUsage.Cpu) / req.Resources.Cpu) - ramCount := int64((totalRam - nodeUsage.Ram) / req.Resources.Ram) + outCount = int64((totalCPU - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((totalRAM - nodeUsage.Ram) / req.Resources.Ram) if outCount > ramCount { outCount = ramCount } @@ -138,10 +144,8 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef return outCount } -/** - * Pretend to Allocate (actually not) the Resource - */ -func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { +// Allocate - pretends to Allocate (actually not) the Resource +func (d *Driver) Allocate(def types.LabelDefinition, _ /*metadata*/ map[string]any) (*types.Resource, error) { var opts Options if err := opts.Apply(def.Options); err != nil { return nil, log.Error("TEST: Unable to apply options:", err) @@ -172,12 +176,13 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* return res, nil } +// Status shows status of the resource func (d *Driver) Status(res *types.Resource) (string, error) { if res == nil || res.Identifier == "" { return "", fmt.Errorf("TEST: Invalid resource: %v", res) } if err := randomFail(fmt.Sprintf("Status %s", res.Identifier), d.cfg.FailStatus); err != nil { - return "", fmt.Errorf("TEST: RandomFail: %v\n", err) + return "", fmt.Errorf("TEST: RandomFail: %v", err) } resFile := filepath.Join(d.cfg.WorkspacePath, res.Identifier) @@ -187,6 +192,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { return drivers.StatusNone, nil } +// GetTask returns task struct by name func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask @@ -207,6 +213,7 @@ func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { return t } +// Deallocate the resource func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return log.Error("TEST: Invalid resource:", res) diff --git a/lib/drivers/test/options.go b/lib/drivers/test/options.go index e417a66..50296ec 100644 --- a/lib/drivers/test/options.go +++ b/lib/drivers/test/options.go @@ -19,6 +19,7 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// Options for testing type Options struct { FailOptionsApply uint8 `json:"fail_options_apply"` // Fail on options Apply (0 - not, 1-254 random, 255-yes) FailOptionsValidate uint8 `json:"fail_options_validate"` // Fail on options Validate (0 - not, 1-254 random, 255-yes) @@ -26,7 +27,8 @@ type Options struct { FailAllocate uint8 `json:"fail_allocate"` // Fail on Allocate (0 - not, 1-254 random, 255-yes) } -func (o *Options) Apply(options util.UnparsedJson) error { +// Apply takes json and applies it to the options structure +func (o *Options) Apply(options util.UnparsedJSON) error { if err := json.Unmarshal([]byte(options), o); err != nil { return log.Error("TEST: Unable to apply the driver options:", err) } @@ -38,6 +40,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { return randomFail("OptionsApply", o.FailOptionsApply) } +// Validate makes sure the options have the required defaults & that the required fields are set func (o *Options) Validate() error { return randomFail("OptionsValidate", o.FailOptionsValidate) } diff --git a/lib/drivers/test/tasks.go b/lib/drivers/test/tasks_snapshot.go similarity index 91% rename from lib/drivers/test/tasks.go rename to lib/drivers/test/tasks_snapshot.go index 290ef3b..043a4b8 100644 --- a/lib/drivers/test/tasks.go +++ b/lib/drivers/test/tasks_snapshot.go @@ -23,8 +23,9 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// TaskSnapshot implements test snapshot task type TaskSnapshot struct { - driver *Driver `json:"-"` + driver *Driver *types.ApplicationTask `json:"-"` // Info about the requested task *types.LabelDefinition `json:"-"` // Info about the used label definition @@ -33,21 +34,25 @@ type TaskSnapshot struct { Full bool `json:"full"` // Make full (all disks including OS image), or just the additional disks snapshot } -func (t *TaskSnapshot) Name() string { +// Name shows name of the task +func (*TaskSnapshot) Name() string { return "snapshot" } +// Clone copies task to use it func (t *TaskSnapshot) Clone() drivers.ResourceDriverTask { n := *t return &n } +// SetInfo defines the task environment func (t *TaskSnapshot) SetInfo(task *types.ApplicationTask, def *types.LabelDefinition, res *types.Resource) { t.ApplicationTask = task t.LabelDefinition = def t.Resource = res } +// Execute runs the task func (t *TaskSnapshot) Execute() (result []byte, err error) { if t.ApplicationTask == nil { return []byte(`{"error":"internal: invalid application task"}`), log.Error("TEST: Invalid application task:", t.ApplicationTask) diff --git a/lib/drivers/vmx/config.go b/lib/drivers/vmx/config.go index d6be978..b7c9e5f 100644 --- a/lib/drivers/vmx/config.go +++ b/lib/drivers/vmx/config.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package vmx implements driver package vmx import ( @@ -24,6 +25,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// Config - node driver configuration type Config struct { VmrunPath string `json:"vmrun_path"` // '/Applications/VMware Fusion.app/Contents/Library/vmrun' VdiskmanagerPath string `json:"vdiskmanager_path"` // '/Applications/VMware Fusion.app/Contents/Library/vmware-vdiskmanager' @@ -37,17 +39,17 @@ type Config struct { // for disk caching) // * Positive (>0) value could also be available (but check it in your vmware dist in advance) // Please be careful here - noone wants the VM to fail allocation because of that... - CpuAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) - RamAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) + CPUAlter int `json:"cpu_alter"` // 0 do nothing, <0 reduces number available CPUs, >0 increases it (dangerous) + RAMAlter int `json:"ram_alter"` // 0 do nothing, <0 reduces amount of available RAM (GB), >0 increases it (dangerous) // Overbook options allows tenants to reuse the resources // It will be used only when overbook is allowed by the tenants. It works by just adding those // amounts to the existing total before checking availability. For example if you have 16CPU - // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CpuOverbook - + // and want to run 2 tenants with requirement of 14 CPUs each - you can put 12 in CPUOverbook - // to have virtually 28 CPUs. 3rd will not be running because 2 tenants will eat all 28 virtual // CPUs. Same applies to the RamOverbook. - CpuOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants - RamOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants + CPUOverbook uint `json:"cpu_overbook"` // How much CPUs could be reused by multiple tenants + RAMOverbook uint `json:"ram_overbook"` // How much RAM (GB) could be reused by multiple tenants DownloadUser string `json:"download_user"` // The user will be used in download operations DownloadPassword string `json:"download_password"` // The password will be used in download operations @@ -55,6 +57,7 @@ type Config struct { LogMonitor bool `json:"log_monitor"` // Actively monitor the vmware.log of VM and reset it on halt } +// Apply takes json and applies it to the config structure func (c *Config) Apply(config []byte) error { // Set defaults c.LogMonitor = true @@ -68,6 +71,7 @@ func (c *Config) Apply(config []byte) error { return nil } +// Validate makes sure the config have the required defaults & that the required fields are set func (c *Config) Validate() (err error) { // Check that values of the config is filled at least with defaults if c.VmrunPath == "" { @@ -112,8 +116,8 @@ func (c *Config) Validate() (err error) { return err } - if c.CpuAlter < 0 && cpuStat <= -c.CpuAlter { - return log.Errorf("VMX: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CpuAlter, cpuStat) + if c.CPUAlter < 0 && cpuStat <= -c.CPUAlter { + return log.Errorf("VMX: |CpuAlter| can't be more or equal the available Host CPUs: |%d| > %d", c.CPUAlter, cpuStat) } memStat, err := mem.VirtualMemory() @@ -122,8 +126,8 @@ func (c *Config) Validate() (err error) { } ramStat := memStat.Total / 1073741824 // Getting GB from Bytes - if c.RamAlter < 0 && int(ramStat) <= -c.RamAlter { - return log.Errorf("VMX: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RamAlter, ramStat) + if c.RAMAlter < 0 && int(ramStat) <= -c.RAMAlter { + return log.Errorf("VMX: |RamAlter| can't be more or equal the available Host RAM: |%d| > %d", c.RAMAlter, ramStat) } return nil diff --git a/lib/drivers/vmx/driver.go b/lib/drivers/vmx/driver.go index 1ca74ec..9686242 100644 --- a/lib/drivers/vmx/driver.go +++ b/lib/drivers/vmx/driver.go @@ -30,14 +30,16 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -// Implements drivers.ResourceDriverFactory interface +// Factory implements drivers.ResourceDriverFactory interface type Factory struct{} -func (f *Factory) Name() string { +// Name shows name of the driver factory +func (*Factory) Name() string { return "vmx" } -func (f *Factory) NewResourceDriver() drivers.ResourceDriver { +// NewResourceDriver creates new resource driver +func (*Factory) NewResourceDriver() drivers.ResourceDriver { return &Driver{} } @@ -45,24 +47,27 @@ func init() { drivers.FactoryList = append(drivers.FactoryList, &Factory{}) } -// Implements drivers.ResourceDriver interface +// Driver implements drivers.ResourceDriver interface type Driver struct { cfg Config // Contains the available tasks of the driver tasksList []drivers.ResourceDriverTask - totalCpu uint // In logical threads - totalRam uint // In RAM GB + totalCPU uint // In logical threads + totalRAM uint // In RAM GB } -func (d *Driver) Name() string { +// Name returns name of the driver +func (*Driver) Name() string { return "vmx" } -func (d *Driver) IsRemote() bool { +// IsRemote needed to detect the out-of-node resources managed by this driver +func (*Driver) IsRemote() bool { return false } +// Prepare initializes the driver func (d *Driver) Prepare(config []byte) error { if err := d.cfg.Apply(config); err != nil { return err @@ -76,20 +81,21 @@ func (d *Driver) Prepare(config []byte) error { if err != nil { return err } - d.totalCpu = uint(cpuStat) + d.totalCPU = uint(cpuStat) memStat, err := mem.VirtualMemory() if err != nil { return err } - d.totalRam = uint(memStat.Total / 1073741824) // Getting GB from Bytes + d.totalRAM = uint(memStat.Total / 1073741824) // Getting GB from Bytes // TODO: Cleanup the image directory in case the images are not good return nil } -func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { +// ValidateDefinition checks LabelDefinition is ok +func (*Driver) ValidateDefinition(def types.LabelDefinition) error { // Check resources if err := def.Resources.Validate([]string{"hfs+", "exfat", "fat32"}, true); err != nil { return log.Error("VMX: Resources validation failed:", err) @@ -100,17 +106,17 @@ func (d *Driver) ValidateDefinition(def types.LabelDefinition) error { return opts.Apply(def.Options) } -// Allow Fish to ask the driver about it's capacity (free slots) of a specific definition +// AvailableCapacity allows Fish to ask the driver about it's capacity (free slots) of a specific definition func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDefinition) int64 { var outCount int64 - availCpu, availRam := d.getAvailResources() + availCPU, availRAM := d.getAvailResources() // Check if the node has the required resources - otherwise we can't run it anyhow - if req.Resources.Cpu > availCpu { + if req.Resources.Cpu > availCPU { return 0 } - if req.Resources.Ram > availRam { + if req.Resources.Ram > availRAM { return 0 } // TODO: Check disk requirements @@ -127,16 +133,16 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef if nodeUsage.Multitenancy && req.Resources.Multitenancy { // Ok we can run more tenants, let's calculate how much if nodeUsage.CpuOverbook && req.Resources.CpuOverbook { - availCpu += d.cfg.CpuOverbook + availCPU += d.cfg.CPUOverbook } if nodeUsage.RamOverbook && req.Resources.RamOverbook { - availRam += d.cfg.RamOverbook + availRAM += d.cfg.RAMOverbook } } // Calculate how much of those definitions we could run - outCount = int64((availCpu - nodeUsage.Cpu) / req.Resources.Cpu) - ramCount := int64((availRam - nodeUsage.Ram) / req.Resources.Ram) + outCount = int64((availCPU - nodeUsage.Cpu) / req.Resources.Cpu) + ramCount := int64((availRAM - nodeUsage.Ram) / req.Resources.Ram) if outCount > ramCount { outCount = ramCount } @@ -145,13 +151,11 @@ func (d *Driver) AvailableCapacity(nodeUsage types.Resources, req types.LabelDef return outCount } -/** - * Allocate VM with provided images - * - * It automatically download the required images, unpack them and runs the VM. - * Not using metadata because there is no good interfaces to pass it to VM. - */ -func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (*types.Resource, error) { +// Allocate VM with provided images +// +// It automatically download the required images, unpack them and runs the VM. +// Not using metadata because there is no good interfaces to pass it to VM. +func (d *Driver) Allocate(def types.LabelDefinition, _ /*metadata*/ map[string]any) (*types.Resource, error) { var opts Options if err := opts.Apply(def.Options); err != nil { return nil, log.Error("VMX: Unable to apply options:", err) @@ -160,7 +164,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* // Generate unique id from the hw address and required directories buf := crypt.RandBytes(6) buf[0] = (buf[0] | 2) & 0xfe // Set local bit, ensure unicast address - vmId := fmt.Sprintf("%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) + vmID := fmt.Sprintf("%02x%02x%02x%02x%02x%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) vmHwaddr := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) vmNetwork := def.Resources.Network @@ -168,25 +172,25 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* vmNetwork = "hostonly" } - vmDir := filepath.Join(d.cfg.WorkspacePath, vmId) + vmDir := filepath.Join(d.cfg.WorkspacePath, vmID) vmImagesDir := filepath.Join(vmDir, "images") // Load the required images imgPath, err := d.loadImages(&opts, vmImagesDir) if err != nil { - d.cleanupVm(vmDir) + d.cleanupVM(vmDir) return nil, log.Error("VMX: Unable to load the required images:", err) } // Clone VM from the image - vmxPath := filepath.Join(vmDir, vmId+".vmx") + vmxPath := filepath.Join(vmDir, vmID+".vmx") args := []string{"-T", "fusion", "clone", imgPath, vmxPath, "linked", "-snapshot", "original", - "-cloneName", vmId, + "-cloneName", vmID, } if _, _, err := runAndLog(120*time.Second, d.cfg.VmrunPath, args...); err != nil { - d.cleanupVm(vmDir) + d.cleanupVM(vmDir) return nil, log.Error("VMX: Unable to clone the target image:", imgPath, err) } @@ -200,25 +204,25 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* "cpuid.corespersocket =", fmt.Sprintf(`cpuid.corespersocket = "%d"`, def.Resources.Cpu), "memsize =", fmt.Sprintf(`memsize = "%d"`, def.Resources.Ram*1024), ); err != nil { - d.cleanupVm(vmDir) + d.cleanupVM(vmDir) return nil, log.Error("VMX: Unable to change cloned VM configuration:", vmxPath, err) } // Create and connect disks to vmx if err := d.disksCreate(vmxPath, def.Resources.Disks); err != nil { - d.cleanupVm(vmDir) + d.cleanupVM(vmDir) return nil, log.Error("VMX: Unable create disks for VM:", vmxPath, err) } // Run the background monitoring of the vmware log if d.cfg.LogMonitor { - go d.logMonitor(vmId, vmxPath) + go d.logMonitor(vmID, vmxPath) } // Run the VM if _, _, err := runAndLog(120*time.Second, d.cfg.VmrunPath, "start", vmxPath, "nogui"); err != nil { log.Error("VMX: Check logs in ~/Library/Logs/VMware/ or enable debug to see vmware.log") - d.cleanupVm(vmDir) + d.cleanupVM(vmDir) return nil, log.Error("VMX: Unable to run VM:", vmxPath, err) } @@ -230,6 +234,7 @@ func (d *Driver) Allocate(def types.LabelDefinition, metadata map[string]any) (* }, nil } +// Status shows status of the resource func (d *Driver) Status(res *types.Resource) (string, error) { if res == nil || res.Identifier == "" { return "", fmt.Errorf("VMX: Invalid resource: %v", res) @@ -240,6 +245,7 @@ func (d *Driver) Status(res *types.Resource) (string, error) { return drivers.StatusNone, nil } +// GetTask returns task struct by name func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { // Look for the specified task name var t drivers.ResourceDriverTask @@ -260,6 +266,7 @@ func (d *Driver) GetTask(name, options string) drivers.ResourceDriverTask { return t } +// Deallocate the resource func (d *Driver) Deallocate(res *types.Resource) error { if res == nil || res.Identifier == "" { return fmt.Errorf("VMX: Invalid resource: %v", res) @@ -284,7 +291,7 @@ func (d *Driver) Deallocate(res *types.Resource) error { } // Cleaning the VM images too - d.cleanupVm(filepath.Dir(vmxPath)) + d.cleanupVM(filepath.Dir(vmxPath)) log.Info("VMX: Deallocate of VM completed:", vmxPath) diff --git a/lib/drivers/vmx/options.go b/lib/drivers/vmx/options.go index af7bf33..ed50252 100644 --- a/lib/drivers/vmx/options.go +++ b/lib/drivers/vmx/options.go @@ -20,21 +20,23 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -/** - * Options example: - * images: - * - url: https://artifact-storage/aquarium/image/vmx/macos1015-VERSION/macos1015-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef1 - * - url: https://artifact-storage/aquarium/image/vmx/macos1015-xcode122-VERSION/macos1015-xcode122-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef2 - * - url: https://artifact-storage/aquarium/image/vmx/macos1015-xcode122-ci-VERSION/macos1015-xcode122-ci-VERSION.tar.xz - * sum: sha256:1234567890abcdef1234567890abcdef3 - */ +// Options for label definition +// +// Example: +// +// images: +// - url: https://artifact-storage/aquarium/image/vmx/macos1015-VERSION/macos1015-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef1 +// - url: https://artifact-storage/aquarium/image/vmx/macos1015-xcode122-VERSION/macos1015-xcode122-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef2 +// - url: https://artifact-storage/aquarium/image/vmx/macos1015-xcode122-ci-VERSION/macos1015-xcode122-ci-VERSION.tar.xz +// sum: sha256:1234567890abcdef1234567890abcdef3 type Options struct { Images []drivers.Image `json:"images"` // List of image dependencies, last one is running one } -func (o *Options) Apply(options util.UnparsedJson) error { +// Apply takes json and applies it to the options structure +func (o *Options) Apply(options util.UnparsedJSON) error { if err := json.Unmarshal([]byte(options), o); err != nil { return log.Error("VMX: Unable to apply the driver options", err) } @@ -42,6 +44,7 @@ func (o *Options) Apply(options util.UnparsedJson) error { return o.Validate() } +// Validate makes sure the options have the required defaults & that the required fields are set func (o *Options) Validate() error { // Check images var imgErr error diff --git a/lib/drivers/vmx/util.go b/lib/drivers/vmx/util.go index 0967508..db94c02 100644 --- a/lib/drivers/vmx/util.go +++ b/lib/drivers/vmx/util.go @@ -33,17 +33,17 @@ import ( ) // Returns the total resources available for the node after alteration -func (d *Driver) getAvailResources() (availCpu, availRam uint) { - if d.cfg.CpuAlter < 0 { - availCpu = d.totalCpu - uint(-d.cfg.CpuAlter) +func (d *Driver) getAvailResources() (availCPU, availRAM uint) { + if d.cfg.CPUAlter < 0 { + availCPU = d.totalCPU - uint(-d.cfg.CPUAlter) } else { - availCpu = d.totalCpu + uint(d.cfg.CpuAlter) + availCPU = d.totalCPU + uint(d.cfg.CPUAlter) } - if d.cfg.RamAlter < 0 { - availRam = d.totalRam - uint(-d.cfg.RamAlter) + if d.cfg.RAMAlter < 0 { + availRAM = d.totalRAM - uint(-d.cfg.RAMAlter) } else { - availRam = d.totalRam + uint(d.cfg.RamAlter) + availRAM = d.totalRAM + uint(d.cfg.RAMAlter) } return @@ -58,7 +58,7 @@ func (d *Driver) loadImages(opts *Options, vmImagesDir string) (string, error) { targetPath := "" var wg sync.WaitGroup for imageIndex, image := range opts.Images { - log.Info("VMX: Loading the required image:", image.Name, image.Version, image.Url) + log.Info("VMX: Loading the required image:", image.Name, image.Version, image.URL) // Running the background routine to download, unpack and process the image // Success will be checked later by existence of the copied image in the vm directory @@ -66,7 +66,7 @@ func (d *Driver) loadImages(opts *Options, vmImagesDir string) (string, error) { go func(image drivers.Image, index int) error { defer wg.Done() if err := image.DownloadUnpack(d.cfg.ImagesPath, d.cfg.DownloadUser, d.cfg.DownloadPassword); err != nil { - return log.Error("VMX: Unable to download and unpack the image:", image.Name, image.Url, err) + return log.Error("VMX: Unable to download and unpack the image:", image.Name, image.URL, err) } // Getting the image subdir name in the unpacked dir @@ -336,26 +336,26 @@ func (d *Driver) disksCreate(vmxPath string, disks map[string]types.ResourcesDis } // Ensures the VM is not stale by monitoring the log -func (d *Driver) logMonitor(vmId, vmxPath string) { +func (d *Driver) logMonitor(vmID, vmxPath string) { // Monitor the vmware.log file logPath := filepath.Join(filepath.Dir(vmxPath), "vmware.log") t, _ := tail.TailFile(logPath, tail.Config{Follow: true, Poll: true}) - log.Debug("VMX: Start monitoring of log:", vmId, logPath) + log.Debug("VMX: Start monitoring of log:", vmID, logPath) for line := range t.Lines { - log.Debug("VMX:", vmId, "vmware.log:", line) + log.Debug("VMX:", vmID, "vmware.log:", line) // Send reset if the VM is switched to 0 status if strings.Contains(line.Text, "Tools: Changing running status: 1 => 0") { - log.Warn("VMX: Resetting the stale VM", vmId, vmxPath) + log.Warn("VMX: Resetting the stale VM", vmID, vmxPath) // We should not spend much time here, because we can miss // the file delete so running in a separated thread go runAndLog(10*time.Second, d.cfg.VmrunPath, "reset", vmxPath) } } - log.Debug("VMX: Done monitoring of log:", vmId, logPath) + log.Debug("VMX: Done monitoring of log:", vmID, logPath) } // Removes the entire directory for clean up purposes -func (d *Driver) cleanupVm(vmDir string) error { +func (*Driver) cleanupVM(vmDir string) error { if err := os.RemoveAll(vmDir); err != nil { log.Warn("VMX: Unable to clean up the vm directory:", vmDir, err) return err diff --git a/lib/fish/application.go b/lib/fish/application.go index e2385cc..2b0edf5 100644 --- a/lib/fish/application.go +++ b/lib/fish/application.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package fish core defines all the internals of the Fish processes package fish import ( @@ -22,10 +23,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// ApplicationFind lists Applications by filter func (f *Fish) ApplicationFind(filter *string) (as []types.Application, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -37,6 +39,7 @@ func (f *Fish) ApplicationFind(filter *string) (as []types.Application, err erro return as, err } +// ApplicationCreate makes new Applciation func (f *Fish) ApplicationCreate(a *types.Application) error { if a.LabelUID == uuid.Nil { return fmt.Errorf("Fish: LabelUID can't be unset") @@ -61,12 +64,14 @@ func (f *Fish) ApplicationCreate(a *types.Application) error { return f.db.Save(app).Error }*/ +// ApplicationGet returns Application by UID func (f *Fish) ApplicationGet(uid types.ApplicationUID) (a *types.Application, err error) { a = &types.Application{} err = f.db.First(a, uid).Error return a, err } +// ApplicationListGetStatusNew returns new Applications func (f *Fish) ApplicationListGetStatusNew() (as []types.Application, err error) { // SELECT * FROM applications WHERE UID in ( // SELECT application_uid FROM ( @@ -81,8 +86,9 @@ func (f *Fish) ApplicationListGetStatusNew() (as []types.Application, err error) return as, err } -func (f *Fish) ApplicationIsAllocated(appUid types.ApplicationUID) (err error) { - state, err := f.ApplicationStateGetByApplication(appUid) +// ApplicationIsAllocated returns if specific Application is allocated +func (f *Fish) ApplicationIsAllocated(appUID types.ApplicationUID) (err error) { + state, err := f.ApplicationStateGetByApplication(appUID) if err != nil { return err } else if state.Status != types.ApplicationStatusALLOCATED { diff --git a/lib/fish/application_state.go b/lib/fish/application_state.go index 9e785bc..451f69c 100644 --- a/lib/fish/application_state.go +++ b/lib/fish/application_state.go @@ -20,11 +20,13 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// ApplicationStateList returns list of ApplicationStates func (f *Fish) ApplicationStateList() (ass []types.ApplicationState, err error) { err = f.db.Find(&ass).Error return ass, err } +// ApplicationStateCreate makes new ApplicationState func (f *Fish) ApplicationStateCreate(as *types.ApplicationState) error { if as.ApplicationUID == uuid.Nil { return fmt.Errorf("Fish: ApplicationUID can't be unset") @@ -42,20 +44,22 @@ func (f *Fish) ApplicationStateCreate(as *types.ApplicationState) error { return f.db.Save(as).Error }*/ +// ApplicationStateGet returns specific ApplicationState func (f *Fish) ApplicationStateGet(uid types.ApplicationStateUID) (as *types.ApplicationState, err error) { as = &types.ApplicationState{} err = f.db.First(as, uid).Error return as, err } -func (f *Fish) ApplicationStateGetByApplication(appUid types.ApplicationUID) (as *types.ApplicationState, err error) { +// ApplicationStateGetByApplication returns ApplicationState by ApplicationUID +func (f *Fish) ApplicationStateGetByApplication(appUID types.ApplicationUID) (as *types.ApplicationState, err error) { as = &types.ApplicationState{} - err = f.db.Where("application_uid = ?", appUid).Order("created_at desc").First(as).Error + err = f.db.Where("application_uid = ?", appUID).Order("created_at desc").First(as).Error return as, err } -// Return false if Status in ERROR, DEALLOCATE or DEALLOCATED state -func (f *Fish) ApplicationStateIsActive(status types.ApplicationStatus) bool { +// ApplicationStateIsActive returns false if Status in ERROR, DEALLOCATE or DEALLOCATED state +func (*Fish) ApplicationStateIsActive(status types.ApplicationStatus) bool { if status == types.ApplicationStatusERROR { return false } diff --git a/lib/fish/application_task.go b/lib/fish/application_task.go index 5868c7f..a72a567 100644 --- a/lib/fish/application_task.go +++ b/lib/fish/application_task.go @@ -22,10 +22,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// ApplicationTaskFindByApplication allows to find all the ApplicationTasks by ApplciationUID func (f *Fish) ApplicationTaskFindByApplication(uid types.ApplicationUID, filter *string) (at []types.ApplicationTask, err error) { db := f.db.Where("application_uid = ?", uid) if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -38,6 +39,7 @@ func (f *Fish) ApplicationTaskFindByApplication(uid types.ApplicationUID, filter return at, err } +// ApplicationTaskCreate makes a new ApplicationTask func (f *Fish) ApplicationTaskCreate(at *types.ApplicationTask) error { if at.ApplicationUID == uuid.Nil { return fmt.Errorf("Fish: ApplicationUID can't be unset") @@ -46,27 +48,30 @@ func (f *Fish) ApplicationTaskCreate(at *types.ApplicationTask) error { return fmt.Errorf("Fish: Task can't be empty") } if at.Options == "" { - at.Options = util.UnparsedJson("{}") + at.Options = util.UnparsedJSON("{}") } if at.Result == "" { - at.Result = util.UnparsedJson("{}") + at.Result = util.UnparsedJSON("{}") } at.UID = f.NewUID() return f.db.Create(at).Error } +// ApplicationTaskSave stores the ApplicationTask func (f *Fish) ApplicationTaskSave(at *types.ApplicationTask) error { return f.db.Save(at).Error } +// ApplicationTaskGet returns the ApplicationTask by ApplicationTaskUID func (f *Fish) ApplicationTaskGet(uid types.ApplicationTaskUID) (at *types.ApplicationTask, err error) { at = &types.ApplicationTask{} err = f.db.First(at, uid).Error return at, err } -func (f *Fish) ApplicationTaskListByApplicationAndWhen(appUid types.ApplicationUID, when types.ApplicationStatus) (at []types.ApplicationTask, err error) { - err = f.db.Where(`application_uid = ? AND "when" = ?`, appUid, when).Order("created_at desc").Find(&at).Error +// ApplicationTaskListByApplicationAndWhen returns list of ApplicationTasks by ApplicationUID and When it need to be executed +func (f *Fish) ApplicationTaskListByApplicationAndWhen(appUID types.ApplicationUID, when types.ApplicationStatus) (at []types.ApplicationTask, err error) { + err = f.db.Where(`application_uid = ? AND "when" = ?`, appUID, when).Order("created_at desc").Find(&at).Error return at, err } diff --git a/lib/fish/config.go b/lib/fish/config.go index 4af588b..3b17b57 100644 --- a/lib/fish/config.go +++ b/lib/fish/config.go @@ -21,14 +21,15 @@ import ( "github.com/ghodss/yaml" ) +// Config defines Fish node configuration type Config struct { Directory string `json:"directory"` // Where to store database and other useful data (if relative - to CWD) APIAddress string `json:"api_address"` // Where to serve Web UI, API & Meta API ProxySocksAddress string `json:"proxy_socks_address"` // Where to serve SOCKS5 proxy for the allocated resources - ProxySshAddress string `json:"proxy_ssh_address"` // Where to serve SSH proxy for the allocated resources + ProxySSHAddress string `json:"proxy_ssh_address"` // Where to serve SSH proxy for the allocated resources NodeAddress string `json:"node_address"` // What is the external address of the node - CpuLimit uint16 `json:"cpu_limit"` // How many CPU threads Node allowed to use (serve API, ...) + CPULimit uint16 `json:"cpu_limit"` // How many CPU threads Node allowed to use (serve API, ...) MemTarget util.HumanSize `json:"mem_target"` // What's the target memory utilization by the Node (GC target where it becomes more aggressive) ClusterJoin []string `json:"cluster_join"` // The node addresses to join the cluster @@ -50,11 +51,13 @@ type Config struct { Drivers []ConfigDriver `json:"drivers"` } +// ConfigDriver helper to store driver config without parsing it right away type ConfigDriver struct { Name string `json:"name"` - Cfg util.UnparsedJson `json:"cfg"` + Cfg util.UnparsedJSON `json:"cfg"` } +// ReadConfigFile needed to read the config file func (c *Config) ReadConfigFile(cfgPath string) error { c.initDefaults() @@ -93,7 +96,7 @@ func (c *Config) initDefaults() { c.Directory = "fish_data" c.APIAddress = "0.0.0.0:8001" c.ProxySocksAddress = "0.0.0.0:1080" - c.ProxySshAddress = "0.0.0.0:2022" + c.ProxySSHAddress = "0.0.0.0:2022" c.NodeAddress = "127.0.0.1:8001" c.TLSKey = "" // Will be set after read config file from NodeName c.TLSCrt = "" // ... diff --git a/lib/fish/drivers.go b/lib/fish/drivers.go index d1ca78a..6938bfb 100644 --- a/lib/fish/drivers.go +++ b/lib/fish/drivers.go @@ -25,12 +25,14 @@ import ( _ "github.com/adobe/aquarium-fish/lib/drivers/native" _ "github.com/adobe/aquarium-fish/lib/drivers/vmx" + // Importing test driver _ "github.com/adobe/aquarium-fish/lib/drivers/test" ) var driversInstances map[string]drivers.ResourceDriver -func (f *Fish) DriverGet(name string) drivers.ResourceDriver { +// driverGet returns specific driver by name +func (*Fish) driverGet(name string) drivers.ResourceDriver { if driversInstances == nil { log.Error("Fish: Resource drivers are not initialized to request the driver instance:", name) return nil @@ -39,8 +41,8 @@ func (f *Fish) DriverGet(name string) drivers.ResourceDriver { return drv } -// Making the drivers instances map with specified names -func (f *Fish) DriversSet() error { +// driversSet making the drivers instances map with specified names +func (f *Fish) driversSet() error { instances := make(map[string]drivers.ResourceDriver) if len(f.cfg.Drivers) == 0 { @@ -71,7 +73,8 @@ func (f *Fish) DriversSet() error { return nil } -func (f *Fish) DriversPrepare(configs []ConfigDriver) (errs []error) { +// driversPrepare initializes the drivers with provided configs +func (*Fish) driversPrepare(configs []ConfigDriver) (errs []error) { activatedDriversInstances := make(map[string]drivers.ResourceDriver) for name, drv := range driversInstances { // Looking for the driver config diff --git a/lib/fish/fish.go b/lib/fish/fish.go index e8835d7..4ce469c 100644 --- a/lib/fish/fish.go +++ b/lib/fish/fish.go @@ -34,8 +34,10 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// ElectionRoundTime defines how long the voting round will take in seconds - so cluster nodes will be able to interchange their responses const ElectionRoundTime = 30 +// Fish structure is used to store the node internal state type Fish struct { db *gorm.DB cfg *Config @@ -66,6 +68,7 @@ type Fish struct { nodeUsage types.Resources } +// New creates new Fish node func New(db *gorm.DB, cfg *Config) (*Fish, error) { f := &Fish{db: db, cfg: cfg} if err := f.Init(); err != nil { @@ -75,6 +78,7 @@ func New(db *gorm.DB, cfg *Config) (*Fish, error) { return f, nil } +// Init initializes the Fish node func (f *Fish) Init() error { f.shutdownCancel = make(chan bool) f.Quit = make(chan os.Signal, 1) @@ -121,7 +125,7 @@ func (f *Fish) Init() error { Name: f.cfg.NodeName, } if f.cfg.NodeLocation != "" { - loc, err := f.LocationGetByName(f.cfg.NodeLocation) + loc, err := f.LocationGet(f.cfg.NodeLocation) if err != nil { log.Info("Fish: Creating new location:", f.cfg.NodeLocation) loc.Name = f.cfg.NodeLocation @@ -172,10 +176,10 @@ func (f *Fish) Init() error { // Fish is running now f.running = true - if err := f.DriversSet(); err != nil { + if err := f.driversSet(); err != nil { return log.Error("Fish: Unable to set drivers:", err) } - if errs := f.DriversPrepare(f.cfg.Drivers); errs != nil { + if errs := f.driversPrepare(f.cfg.Drivers); errs != nil { log.Error("Fish: Unable to prepare some resource drivers:", errs) } @@ -219,25 +223,29 @@ func (f *Fish) Init() error { return nil } +// Close tells the node that the Fish execution need to be stopped func (f *Fish) Close() { f.running = false } +// GetNodeUID returns node UID func (f *Fish) GetNodeUID() types.ApplicationUID { return f.node.UID } +// GetNode returns Fish node spec func (f *Fish) GetNode() *types.Node { return f.node } -// Creates new UID with 6 starting bytes of Node UID as prefix +// NewUID Creates new UID with 6 starting bytes of Node UID as prefix func (f *Fish) NewUID() uuid.UUID { uid := uuid.New() copy(uid[:], f.node.UID[:6]) return uid } +// GetLocationName returns node location func (f *Fish) GetLocationName() types.LocationName { return f.node.LocationName } @@ -402,7 +410,7 @@ func (f *Fish) voteProcessRound(vote *types.Vote) error { } // Next round seems needed - vote.Round += 1 + vote.Round++ vote.UID = uuid.Nil break } @@ -422,7 +430,7 @@ func (f *Fish) isNodeAvailableForDefinition(def types.LabelDefinition) bool { } // Is node supports the required label driver - driver := f.DriverGet(def.Driver) + driver := f.driverGet(def.Driver) if driver == nil { return false } @@ -518,7 +526,7 @@ func (f *Fish) executeApplication(vote types.Vote) error { } // Locate the required driver - driver := f.DriverGet(labelDef.Driver) + driver := f.driverGet(labelDef.Driver) if driver == nil { f.nodeUsageMutex.Unlock() return fmt.Errorf("Fish: Unable to locate driver for the Application %s: %s", app.UID, labelDef.Driver) @@ -586,7 +594,7 @@ func (f *Fish) executeApplication(vote types.Vote) error { res := &types.Resource{ ApplicationUID: app.UID, NodeUID: f.node.UID, - Metadata: util.UnparsedJson(mergedMetadata), + Metadata: util.UnparsedJSON(mergedMetadata), } if appState.Status == types.ApplicationStatusALLOCATED { res, err = f.ResourceGetByApplication(app.UID) @@ -687,7 +695,7 @@ func (f *Fish) executeApplication(vote types.Vote) error { log.Errorf("Fish: Unable to deallocate the Resource of Application: %s (try: %d): %v", app.UID, deallocateRetry, err) // Let's retry to deallocate the resource 10 times before give up if deallocateRetry <= 10 { - deallocateRetry += 1 + deallocateRetry++ time.Sleep(10 * time.Second) continue } @@ -745,7 +753,7 @@ func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.La t := drv.GetTask(task.Task, string(task.Options)) if t == nil { log.Error("Fish: Unable to get associated driver task type for Application:", res.ApplicationUID, task.Task) - task.Result = util.UnparsedJson(`{"error":"task not available in driver"}`) + task.Result = util.UnparsedJSON(`{"error":"task not available in driver"}`) } else { // Executing the task t.SetInfo(&task, def, res) @@ -754,7 +762,7 @@ func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.La // We're not crashing here because even with error task could have a result log.Error("Fish: Error happened during executing the task:", task.UID, err) } - task.Result = util.UnparsedJson(result) + task.Result = util.UnparsedJSON(result) } if err := f.ApplicationTaskSave(&task); err != nil { log.Error("Fish: Error during update the task with result:", task.UID, err) @@ -764,9 +772,9 @@ func (f *Fish) executeApplicationTasks(drv drivers.ResourceDriver, def *types.La return nil } -func (f *Fish) removeFromExecutingApplincations(appUid types.ApplicationUID) { +func (f *Fish) removeFromExecutingApplincations(appUID types.ApplicationUID) { for i, uid := range f.applications { - if uid != appUid { + if uid != appUID { continue } f.applications[i] = f.applications[len(f.applications)-1] @@ -775,25 +783,25 @@ func (f *Fish) removeFromExecutingApplincations(appUid types.ApplicationUID) { } } -func (f *Fish) voteActive(appUid types.ApplicationUID) bool { +func (f *Fish) voteActive(appUID types.ApplicationUID) bool { f.activeVotesMutex.Lock() defer f.activeVotesMutex.Unlock() for _, vote := range f.activeVotes { - if vote.ApplicationUID == appUid { + if vote.ApplicationUID == appUID { return true } } return false } -func (f *Fish) voteActiveRemove(voteUid types.VoteUID) { +func (f *Fish) voteActiveRemove(voteUID types.VoteUID) { f.activeVotesMutex.Lock() defer f.activeVotesMutex.Unlock() av := f.activeVotes for i, v := range f.activeVotes { - if v.UID != voteUid { + if v.UID != voteUID { continue } av[i] = av[len(av)-1] @@ -802,7 +810,7 @@ func (f *Fish) voteActiveRemove(voteUid types.VoteUID) { } } -// Set/unset the maintenance mode which will not allow to accept the additional Applications +// MaintenanceSet sets/unsets the maintenance mode which will not allow to accept the additional Applications func (f *Fish) MaintenanceSet(value bool) { if f.maintenance != value { if value { @@ -815,7 +823,7 @@ func (f *Fish) MaintenanceSet(value bool) { f.maintenance = value } -// Tells node it need to execute graceful shutdown operation +// ShutdownSet tells node it need to execute graceful shutdown operation func (f *Fish) ShutdownSet(value bool) { if f.shutdown != value { if value { @@ -829,7 +837,7 @@ func (f *Fish) ShutdownSet(value bool) { f.shutdown = value } -// Set of how much time to wait before executing the node shutdown operation +// ShutdownDelaySet set of how much time to wait before executing the node shutdown operation func (f *Fish) ShutdownDelaySet(delay time.Duration) { if f.shutdownDelay != delay { log.Info("Fish: Shutdown delay is set to:", delay) @@ -846,10 +854,13 @@ func (f *Fish) activateShutdown() { // Running the main shutdown routine go func() { fireShutdown := make(chan bool, 1) - delayTickerReport := &time.Ticker{} - delayTimer := &time.Timer{} + var delayTickerReport *time.Ticker + var delayTimer *time.Timer var delayEndTime time.Time + defer delayTickerReport.Stop() + defer delayTimer.Stop() + for { select { case <-f.shutdownCancel: @@ -860,10 +871,8 @@ func (f *Fish) activateShutdown() { // If the delay is set, then running timer to execute shutdown with delay if f.shutdownDelay > 0 { delayEndTime = time.Now().Add(f.shutdownDelay) - delayTickerReport := time.NewTicker(30 * time.Second) - defer delayTickerReport.Stop() + delayTickerReport = time.NewTicker(30 * time.Second) delayTimer = time.NewTimer(f.shutdownDelay) - defer delayTimer.Stop() } else { // No delay is needed, so shutdown now fireShutdown <- true diff --git a/lib/fish/label.go b/lib/fish/label.go index c27b2f6..f469ed0 100644 --- a/lib/fish/label.go +++ b/lib/fish/label.go @@ -21,10 +21,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// LabelFind returns list of Labels that fits filter func (f *Fish) LabelFind(filter *string) (labels []types.Label, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -36,6 +37,7 @@ func (f *Fish) LabelFind(filter *string) (labels []types.Label, err error) { return labels, err } +// LabelCreate makes new Label func (f *Fish) LabelCreate(l *types.Label) error { if l.Name == "" { return fmt.Errorf("Fish: Name can't be empty") @@ -72,12 +74,14 @@ func (f *Fish) LabelCreate(l *types.Label) error { return f.db.Save(label).Error }*/ +// LabelGet returns Label by UID func (f *Fish) LabelGet(uid types.LabelUID) (label *types.Label, err error) { label = &types.Label{} err = f.db.First(label, uid).Error return label, err } +// LabelDelete deletes the Label by UID func (f *Fish) LabelDelete(uid types.LabelUID) error { return f.db.Delete(&types.Label{}, uid).Error } diff --git a/lib/fish/location.go b/lib/fish/location.go index 03adc2d..2b3f6bb 100644 --- a/lib/fish/location.go +++ b/lib/fish/location.go @@ -20,10 +20,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// LocationFind returns list of Locations fits filter func (f *Fish) LocationFind(filter *string) (ls []types.Location, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -35,6 +36,7 @@ func (f *Fish) LocationFind(filter *string) (ls []types.Location, err error) { return ls, err } +// LocationCreate makes new Location func (f *Fish) LocationCreate(l *types.Location) error { if l.Name == "" { return fmt.Errorf("Fish: Name can't be empty") @@ -43,22 +45,19 @@ func (f *Fish) LocationCreate(l *types.Location) error { return f.db.Create(l).Error } +// LocationSave stores the Location func (f *Fish) LocationSave(l *types.Location) error { return f.db.Save(l).Error } +// LocationGet returns Location by it's unique name func (f *Fish) LocationGet(name types.LocationName) (l *types.Location, err error) { l = &types.Location{} err = f.db.First(l, name).Error return l, err } -func (f *Fish) LocationGetByName(name string) (l *types.Location, err error) { - l = &types.Location{} - err = f.db.Where("name = ?", name).First(l).Error - return l, err -} - +// LocationDelete removes location func (f *Fish) LocationDelete(name types.LocationName) error { return f.db.Delete(&types.Location{}, name).Error } diff --git a/lib/fish/node.go b/lib/fish/node.go index 4ab9862..226bf88 100644 --- a/lib/fish/node.go +++ b/lib/fish/node.go @@ -24,10 +24,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// NodeFind returns list of Nodes that fits filter func (f *Fish) NodeFind(filter *string) (ns []types.Node, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -39,6 +40,14 @@ func (f *Fish) NodeFind(filter *string) (ns []types.Node, err error) { return ns, err } +// NodeGet returns Node by it's unique name +func (f *Fish) NodeGet(name string) (node *types.Node, err error) { + node = &types.Node{} + err = f.db.Where("name = ?", name).First(node).Error + return node, err +} + +// NodeActiveList lists all the nodes in the cluster func (f *Fish) NodeActiveList() (ns []types.Node, err error) { // Only the nodes that pinged at least twice the delay time t := time.Now().Add(-types.NodePingDelay * 2 * time.Second) @@ -46,6 +55,7 @@ func (f *Fish) NodeActiveList() (ns []types.Node, err error) { return ns, err } +// NodeCreate makes new Node func (f *Fish) NodeCreate(n *types.Node) error { if n.Name == "" { return fmt.Errorf("Fish: Name can't be empty") @@ -61,20 +71,16 @@ func (f *Fish) NodeCreate(n *types.Node) error { return f.db.Create(n).Error } +// NodeSave stores Node func (f *Fish) NodeSave(node *types.Node) error { return f.db.Save(node).Error } +// NodePing updates Node and shows that it's active func (f *Fish) NodePing(node *types.Node) error { return f.db.Model(node).Update("name", node.Name).Error } -func (f *Fish) NodeGet(name string) (node *types.Node, err error) { - node = &types.Node{} - err = f.db.Where("name = ?", name).First(node).Error - return node, err -} - func (f *Fish) pingProcess() { // In order to optimize network & database - update just UpdatedAt field pingTicker := time.NewTicker(types.NodePingDelay * time.Second) diff --git a/lib/fish/resource.go b/lib/fish/resource.go index c7a20d5..dc9221c 100644 --- a/lib/fish/resource.go +++ b/lib/fish/resource.go @@ -26,10 +26,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// ResourceFind lists Resources that fits filter func (f *Fish) ResourceFind(filter *string) (rs []types.Resource, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -41,11 +42,13 @@ func (f *Fish) ResourceFind(filter *string) (rs []types.Resource, err error) { return rs, err } -func (f *Fish) ResourceListNode(nodeUid types.NodeUID) (rs []types.Resource, err error) { - err = f.db.Where("node_uid = ?", nodeUid).Find(&rs).Error +// ResourceListNode returns list of resources for provided NodeUID +func (f *Fish) ResourceListNode(nodeUID types.NodeUID) (rs []types.Resource, err error) { + err = f.db.Where("node_uid = ?", nodeUID).Find(&rs).Error return rs, err } +// ResourceCreate makes new Resource func (f *Fish) ResourceCreate(r *types.Resource) error { if r.ApplicationUID == uuid.Nil { return fmt.Errorf("Fish: ApplicationUID can't be unset") @@ -68,6 +71,7 @@ func (f *Fish) ResourceCreate(r *types.Resource) error { return f.db.Create(r).Error } +// ResourceDelete removes Resource func (f *Fish) ResourceDelete(uid types.ResourceUID) error { // First delete any references to this resource. err := f.ResourceAccessDeleteByResource(uid) @@ -78,10 +82,12 @@ func (f *Fish) ResourceDelete(uid types.ResourceUID) error { return f.db.Delete(&types.Resource{}, uid).Error } +// ResourceSave stores Resource func (f *Fish) ResourceSave(res *types.Resource) error { return f.db.Save(res).Error } +// ResourceGet returns Resource by it's UID func (f *Fish) ResourceGet(uid types.ResourceUID) (res *types.Resource, err error) { res = &types.Resource{} err = f.db.First(res, uid).Error @@ -135,17 +141,15 @@ func isControlledNetwork(ip string) bool { } for _, a := range addrs { - switch v := a.(type) { - case *net.IPNet: - if checkIPv4Address(v, ipParsed) { - return true - } + if v, ok := a.(*net.IPNet); ok && checkIPv4Address(v, ipParsed) { + return true } } } return false } +// ResourceGetByIP returns Resource by it's IP address func (f *Fish) ResourceGetByIP(ip string) (res *types.Resource, err error) { res = &types.Resource{} @@ -189,21 +193,20 @@ func (f *Fish) ResourceGetByIP(ip string) (res *types.Resource, err error) { return res, err } -func (f *Fish) ResourceGetByApplication(appUid types.ApplicationUID) (res *types.Resource, err error) { +// ResourceGetByApplication returns Resource by ApplicationUID +func (f *Fish) ResourceGetByApplication(appUID types.ApplicationUID) (res *types.Resource, err error) { res = &types.Resource{} - err = f.db.Where("application_uid = ?", appUid).First(res).Error + err = f.db.Where("application_uid = ?", appUID).First(res).Error return res, err } -func (f *Fish) ResourceServiceMapping(res *types.Resource, dest string) string { +// ResourceServiceMappingByApplicationAndDest is trying to find the ResourceServiceMapping record with Application and Location if possible. +// The application in priority, location - secondary priority, if no such records found - default will be used. +func (f *Fish) ResourceServiceMappingByApplicationAndDest(appUID types.ApplicationUID, dest string) string { sm := &types.ServiceMapping{} - // TODO: rewrite to uid system - // Trying to find the record with Application and Location if possible - // The application in priority, location - secondary priority, if no such - // records found - default will be used err := f.db.Where( - "application_uid = ?", res.ApplicationUID).Where( + "application_uid = ?", appUID).Where( "location_uid = ?", f.GetLocationName()).Where( "service = ?", dest).Order("application_uid DESC").Order("location_uid DESC").First(sm).Error if err != nil { diff --git a/lib/fish/resource_access.go b/lib/fish/resource_access.go index a5ad39e..b1a264e 100644 --- a/lib/fish/resource_access.go +++ b/lib/fish/resource_access.go @@ -20,6 +20,7 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/types" ) +// ResourceAccessCreate makes new ResourceAccess func (f *Fish) ResourceAccessCreate(r *types.ResourceAccess) error { if r.ResourceUID == uuid.Nil { return fmt.Errorf("Fish: ResourceUID can't be nil") @@ -36,16 +37,18 @@ func (f *Fish) ResourceAccessCreate(r *types.ResourceAccess) error { return f.db.Create(r).Error } -func (f *Fish) ResourceAccessDeleteByResource(resourceUid types.ResourceUID) error { - ra := types.ResourceAccess{ResourceUID: resourceUid} +// ResourceAccessDeleteByResource removes ResourceAccess by ResourceUID +func (f *Fish) ResourceAccessDeleteByResource(resourceUID types.ResourceUID) error { + ra := types.ResourceAccess{ResourceUID: resourceUID} return f.db.Where(&ra).Delete(&ra).Error } +// ResourceAccessDelete removes ResourceAccess by UID func (f *Fish) ResourceAccessDelete(uid types.ResourceAccessUID) error { return f.db.Delete(&types.ResourceAccess{}, uid).Error } -// Retrieves the password from the database *AND* deletes it. Users must +// ResourceAccessSingleUsePassword retrieves the password from the database *AND* deletes it. Users must // issue another curl call to request a new access password. func (f *Fish) ResourceAccessSingleUsePassword(username string, password string) (ra *types.ResourceAccess, err error) { ra = &types.ResourceAccess{} diff --git a/lib/fish/servicemapping.go b/lib/fish/servicemapping.go index 63221b1..5541b27 100644 --- a/lib/fish/servicemapping.go +++ b/lib/fish/servicemapping.go @@ -20,10 +20,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// ServiceMappingFind returns list of ServiceMappings that fits the filter func (f *Fish) ServiceMappingFind(filter *string) (sms []types.ServiceMapping, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -35,6 +36,7 @@ func (f *Fish) ServiceMappingFind(filter *string) (sms []types.ServiceMapping, e return sms, err } +// ServiceMappingCreate makes new ServiceMapping func (f *Fish) ServiceMappingCreate(sm *types.ServiceMapping) error { if sm.Service == "" { return fmt.Errorf("Fish: Service can't be empty") @@ -47,16 +49,19 @@ func (f *Fish) ServiceMappingCreate(sm *types.ServiceMapping) error { return f.db.Create(sm).Error } +// ServiceMappingSave stores ServiceMapping func (f *Fish) ServiceMappingSave(sm *types.ServiceMapping) error { return f.db.Save(sm).Error } +// ServiceMappingGet returns ServiceMapping by UID func (f *Fish) ServiceMappingGet(uid types.ServiceMappingUID) (sm *types.ServiceMapping, err error) { sm = &types.ServiceMapping{} err = f.db.First(sm, uid).Error return sm, err } +// ServiceMappingDelete removes ServiceMapping func (f *Fish) ServiceMappingDelete(uid types.ServiceMappingUID) error { return f.db.Delete(&types.ServiceMapping{}, uid).Error } diff --git a/lib/fish/user.go b/lib/fish/user.go index 771f023..eca83ed 100644 --- a/lib/fish/user.go +++ b/lib/fish/user.go @@ -21,10 +21,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// UserFind returns list of users that fits the filter func (f *Fish) UserFind(filter *string) (us []types.User, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -36,6 +37,7 @@ func (f *Fish) UserFind(filter *string) (us []types.User, err error) { return us, err } +// UserCreate makes new User func (f *Fish) UserCreate(u *types.User) error { if u.Name == "" { return fmt.Errorf("Fish: Name can't be empty") @@ -47,16 +49,19 @@ func (f *Fish) UserCreate(u *types.User) error { return f.db.Create(u).Error } +// UserSave stores User func (f *Fish) UserSave(u *types.User) error { return f.db.Save(u).Error } +// UserGet returns User by unique name func (f *Fish) UserGet(name string) (u *types.User, err error) { u = &types.User{} err = f.db.Where("name = ?", name).First(u).Error return u, err } +// UserAuth returns User if name and password are correct func (f *Fish) UserAuth(name string, password string) *types.User { // TODO: Make auth process to take constant time in case of failure user, err := f.UserGet(name) @@ -77,6 +82,7 @@ func (f *Fish) UserAuth(name string, password string) *types.User { return user } +// UserNew makes new User func (f *Fish) UserNew(name string, password string) (string, *types.User, error) { if password == "" { password = crypt.RandString(64) @@ -94,6 +100,7 @@ func (f *Fish) UserNew(name string, password string) (string, *types.User, error return password, user, nil } +// UserDelete removes User func (f *Fish) UserDelete(name string) error { return f.db.Where("name = ?", name).Delete(&types.User{}).Error } diff --git a/lib/fish/vote.go b/lib/fish/vote.go index ad2223f..00732dc 100644 --- a/lib/fish/vote.go +++ b/lib/fish/vote.go @@ -23,10 +23,11 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) +// VoteFind returns list of Votes that fits filter func (f *Fish) VoteFind(filter *string) (vs []types.Vote, err error) { db := f.db if filter != nil { - securedFilter, err := util.ExpressionSqlFilter(*filter) + securedFilter, err := util.ExpressionSQLFilter(*filter) if err != nil { log.Warn("Fish: SECURITY: weird SQL filter received:", err) // We do not fail here because we should not give attacker more information @@ -38,6 +39,7 @@ func (f *Fish) VoteFind(filter *string) (vs []types.Vote, err error) { return vs, err } +// VoteCreate makes new Vote func (f *Fish) VoteCreate(v *types.Vote) error { if v.ApplicationUID == uuid.Nil { return fmt.Errorf("Fish: ApplicationUID can't be unset") @@ -56,33 +58,38 @@ func (f *Fish) VoteCreate(v *types.Vote) error { return f.db.Save(v).Error }*/ +// VoteGet returns Vote by it's UID func (f *Fish) VoteGet(uid types.VoteUID) (v *types.Vote, err error) { v = &types.Vote{} err = f.db.First(v, uid).Error return v, err } -func (f *Fish) VoteCurrentRoundGet(appUid types.ApplicationUID) uint16 { +// VoteCurrentRoundGet returns the current round of voting based on the known Votes +func (f *Fish) VoteCurrentRoundGet(appUID types.ApplicationUID) uint16 { var result types.Vote - f.db.Select("max(round) as round").Where("application_uid = ?", appUid).First(&result) + f.db.Select("max(round) as round").Where("application_uid = ?", appUID).First(&result) return result.Round } -func (f *Fish) VoteListGetApplicationRound(appUid types.ApplicationUID, round uint16) (vs []types.Vote, err error) { - err = f.db.Where("application_uid = ?", appUid).Where("round = ?", round).Find(&vs).Error +// VoteListGetApplicationRound returns Votes for the specified round +func (f *Fish) VoteListGetApplicationRound(appUID types.ApplicationUID, round uint16) (vs []types.Vote, err error) { + err = f.db.Where("application_uid = ?", appUID).Where("round = ?", round).Find(&vs).Error return vs, err } -func (f *Fish) VoteGetElectionWinner(appUid types.ApplicationUID, round uint16) (v *types.Vote, err error) { - // Current rule is simple - sort everyone answered smallest available number and the first one wins +// VoteGetElectionWinner returns Vote that won the election +func (f *Fish) VoteGetElectionWinner(appUID types.ApplicationUID, round uint16) (v *types.Vote, err error) { + // Current rule is simple - sort everyone answered the smallest available number and the first one wins v = &types.Vote{} - err = f.db.Where("application_uid = ?", appUid).Where("round = ?", round).Where("available >= 0"). + err = f.db.Where("application_uid = ?", appUID).Where("round = ?", round).Where("available >= 0"). Order("available ASC").Order("created_at ASC").Order("rand ASC").First(&v).Error return v, err } -func (f *Fish) VoteGetNodeApplication(nodeUid types.NodeUID, appUid types.ApplicationUID) (v *types.Vote, err error) { +// VoteGetNodeApplication returns latest Vote by Node and Application +func (f *Fish) VoteGetNodeApplication(nodeUID types.NodeUID, appUID types.ApplicationUID) (v *types.Vote, err error) { v = &types.Vote{} - err = f.db.Where("application_uid = ?", appUid).Where("node_uid = ?", nodeUid).Order("round DESC").First(&v).Error + err = f.db.Where("application_uid = ?", appUID).Where("node_uid = ?", nodeUID).Order("round DESC").First(&v).Error return v, err } diff --git a/lib/log/log.go b/lib/log/log.go index 2e51071..0e28a4c 100644 --- a/lib/log/log.go +++ b/lib/log/log.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package log provides logging for Fish executable package log import ( @@ -19,13 +20,16 @@ import ( ) var ( - UseTimestamp bool = true - Verbosity int8 = 2 + // UseTimestamp needed if you don't want to output timestamp in the logging message + // for example that's helpful in case your service journal already contains timestamps + UseTimestamp = true - DebugLogger *log.Logger - InfoLogger *log.Logger - WarnLogger *log.Logger - ErrorLogger *log.Logger + verbosity int8 = 2 + + debugLogger *log.Logger + infoLogger *log.Logger + warnLogger *log.Logger + errorLogger *log.Logger ) func init() { @@ -33,16 +37,17 @@ func init() { InitLoggers() } +// SetVerbosity defines verbosity of the logger func SetVerbosity(level string) error { switch level { case "debug": - Verbosity = 1 + verbosity = 1 case "info": - Verbosity = 2 + verbosity = 2 case "warn": - Verbosity = 3 + verbosity = 3 case "error": - Verbosity = 4 + verbosity = 4 default: return fmt.Errorf("Unable to parse verbosity level: %s", level) } @@ -50,80 +55,100 @@ func SetVerbosity(level string) error { return nil } +// GetVerbosity returns current verbosity level +func GetVerbosity() int8 { + return verbosity +} + +// InitLoggers initializes the loggers func InitLoggers() error { flags := log.Lmsgprefix // Skip timestamp if not needed if UseTimestamp { flags |= log.Ldate | log.Ltime - if Verbosity < 2 { + if verbosity < 2 { flags |= log.Lmicroseconds } } // Show short file for debug verbosity - if Verbosity < 2 { + if verbosity < 2 { flags |= log.Lshortfile } - DebugLogger = log.New(os.Stdout, "DEBUG:\t", flags) - InfoLogger = log.New(os.Stdout, "INFO:\t", flags) - WarnLogger = log.New(os.Stdout, "WARN:\t", flags) - ErrorLogger = log.New(os.Stdout, "ERROR:\t", flags) + debugLogger = log.New(os.Stdout, "DEBUG:\t", flags) + infoLogger = log.New(os.Stdout, "INFO:\t", flags) + warnLogger = log.New(os.Stdout, "WARN:\t", flags) + errorLogger = log.New(os.Stdout, "ERROR:\t", flags) return nil } +// GetInfoLogger returns Info logger func GetInfoLogger() *log.Logger { - return InfoLogger + return infoLogger +} + +// GetErrorLogger returns Error logger +func GetErrorLogger() *log.Logger { + return errorLogger } +// Debug logs debug message func Debug(v ...any) { - if Verbosity <= 1 { - DebugLogger.Output(2, fmt.Sprintln(v...)) + if verbosity <= 1 { + debugLogger.Output(2, fmt.Sprintln(v...)) } } +// Debugf logs debug message with formatting func Debugf(format string, v ...any) { - if Verbosity <= 1 { - DebugLogger.Output(2, fmt.Sprintf(format+"\n", v...)) + if verbosity <= 1 { + debugLogger.Output(2, fmt.Sprintf(format+"\n", v...)) } } +// Info logs info message func Info(v ...any) { - if Verbosity <= 2 { - InfoLogger.Output(2, fmt.Sprintln(v...)) + if verbosity <= 2 { + infoLogger.Output(2, fmt.Sprintln(v...)) } } +// Infof logs info message with formatting func Infof(format string, v ...any) { - if Verbosity <= 2 { - InfoLogger.Output(2, fmt.Sprintf(format+"\n", v...)) + if verbosity <= 2 { + infoLogger.Output(2, fmt.Sprintf(format+"\n", v...)) } } +// Warn logs warning message func Warn(v ...any) { - if Verbosity <= 3 { - WarnLogger.Output(2, fmt.Sprintln(v...)) + if verbosity <= 3 { + warnLogger.Output(2, fmt.Sprintln(v...)) } } +// Warnf logs warning message with formatting func Warnf(format string, v ...any) { - if Verbosity <= 3 { - WarnLogger.Output(2, fmt.Sprintf(format+"\n", v...)) + if verbosity <= 3 { + warnLogger.Output(2, fmt.Sprintf(format+"\n", v...)) } } +// Error logs error message func Error(v ...any) error { msg := fmt.Sprintln(v...) - if Verbosity <= 4 { - ErrorLogger.Output(2, msg) + if verbosity <= 4 { + errorLogger.Output(2, msg) } return fmt.Errorf("%s", msg) } +// Errorf logs error message with formatting func Errorf(format string, v ...any) error { - if Verbosity <= 4 { - ErrorLogger.Output(2, fmt.Sprintf(format+"\n", v...)) + if verbosity <= 4 { + errorLogger.Output(2, fmt.Sprintf(format+"\n", v...)) } return fmt.Errorf(format, v...) } diff --git a/lib/openapi/api/api_v1.go b/lib/openapi/api/api_v1.go index bdae940..7f35dce 100644 --- a/lib/openapi/api/api_v1.go +++ b/lib/openapi/api/api_v1.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package api is an API definition package api import ( @@ -31,12 +32,14 @@ import ( // H is a shortcut for map[string]any type H map[string]any +// Processor doing processing of the API request type Processor struct { fish *fish.Fish } -func NewV1Router(e *echo.Echo, fish *fish.Fish) { - proc := &Processor{fish: fish} +// NewV1Router creates router for APIv1 +func NewV1Router(e *echo.Echo, f *fish.Fish) { + proc := &Processor{fish: f} router := e.Group("") router.Use( // Regular basic auth @@ -47,6 +50,7 @@ func NewV1Router(e *echo.Echo, fish *fish.Fish) { RegisterHandlers(router, proc) } +// BasicAuth middleware to ensure API will not be used by crocodile func (e *Processor) BasicAuth(username, password string, c echo.Context) (bool, error) { c.Set("uid", crypt.RandString(8)) log.Debugf("API: %s: New request received: %s %s", username, c.Get("uid"), c.Path()) @@ -60,7 +64,8 @@ func (e *Processor) BasicAuth(username, password string, c echo.Context) (bool, return user != nil, nil } -func (e *Processor) UserMeGet(c echo.Context) error { +// UserMeGet API call processor +func (*Processor) UserMeGet(c echo.Context) error { user, ok := c.Get("user").(*types.User) if !ok { c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) @@ -71,6 +76,7 @@ func (e *Processor) UserMeGet(c echo.Context) error { return c.JSON(http.StatusOK, user) } +// UserListGet API call processor func (e *Processor) UserListGet(c echo.Context, params types.UserListGetParams) error { // Only admin can list users user, ok := c.Get("user").(*types.User) @@ -92,6 +98,7 @@ func (e *Processor) UserListGet(c echo.Context, params types.UserListGetParams) return c.JSON(http.StatusOK, out) } +// UserGet API call processor func (e *Processor) UserGet(c echo.Context, name string) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -112,6 +119,7 @@ func (e *Processor) UserGet(c echo.Context, name string) error { return c.JSON(http.StatusOK, out) } +// UserCreateUpdatePost API call processor func (e *Processor) UserCreateUpdatePost(c echo.Context) error { // Only admin can create user, or user can update itself var data types.UserAPIPassword @@ -161,6 +169,7 @@ func (e *Processor) UserCreateUpdatePost(c echo.Context) error { return c.JSON(http.StatusOK, data) } +// UserDelete API call processor func (e *Processor) UserDelete(c echo.Context, name string) error { // Only admin can delete user user, ok := c.Get("user").(*types.User) @@ -181,6 +190,7 @@ func (e *Processor) UserDelete(c echo.Context, name string) error { return c.JSON(http.StatusOK, H{"message": "User removed"}) } +// ResourceListGet API call processor func (e *Processor) ResourceListGet(c echo.Context, params types.ResourceListGetParams) error { // Only admin can list the resources user, ok := c.Get("user").(*types.User) @@ -202,6 +212,7 @@ func (e *Processor) ResourceListGet(c echo.Context, params types.ResourceListGet return c.JSON(http.StatusOK, out) } +// ResourceGet API call processor func (e *Processor) ResourceGet(c echo.Context, uid types.ResourceUID) error { // Only admin can get the resource directly user, ok := c.Get("user").(*types.User) @@ -223,6 +234,7 @@ func (e *Processor) ResourceGet(c echo.Context, uid types.ResourceUID) error { return c.JSON(http.StatusOK, out) } +// ResourceAccessPut API call processor func (e *Processor) ResourceAccessPut(c echo.Context, uid types.ResourceUID) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -257,6 +269,7 @@ func (e *Processor) ResourceAccessPut(c echo.Context, uid types.ResourceUID) err return c.JSON(http.StatusOK, rAccess) } +// ApplicationListGet API call processor func (e *Processor) ApplicationListGet(c echo.Context, params types.ApplicationListGetParams) error { out, err := e.fish.ApplicationFind(params.Filter) if err != nil { @@ -283,6 +296,7 @@ func (e *Processor) ApplicationListGet(c echo.Context, params types.ApplicationL return c.JSON(http.StatusOK, out) } +// ApplicationGet API call processor func (e *Processor) ApplicationGet(c echo.Context, uid types.ApplicationUID) error { app, err := e.fish.ApplicationGet(uid) if err != nil { @@ -304,6 +318,7 @@ func (e *Processor) ApplicationGet(c echo.Context, uid types.ApplicationUID) err return c.JSON(http.StatusOK, app) } +// ApplicationCreatePost API call processor func (e *Processor) ApplicationCreatePost(c echo.Context) error { var data types.Application if err := c.Bind(&data); err != nil { @@ -327,6 +342,7 @@ func (e *Processor) ApplicationCreatePost(c echo.Context) error { return c.JSON(http.StatusOK, data) } +// ApplicationResourceGet API call processor func (e *Processor) ApplicationResourceGet(c echo.Context, uid types.ApplicationUID) error { app, err := e.fish.ApplicationGet(uid) if err != nil { @@ -354,6 +370,7 @@ func (e *Processor) ApplicationResourceGet(c echo.Context, uid types.Application return c.JSON(http.StatusOK, out) } +// ApplicationStateGet API call processor func (e *Processor) ApplicationStateGet(c echo.Context, uid types.ApplicationUID) error { app, err := e.fish.ApplicationGet(uid) if err != nil { @@ -381,11 +398,12 @@ func (e *Processor) ApplicationStateGet(c echo.Context, uid types.ApplicationUID return c.JSON(http.StatusOK, out) } -func (e *Processor) ApplicationTaskListGet(c echo.Context, appUid types.ApplicationUID, params types.ApplicationTaskListGetParams) error { - app, err := e.fish.ApplicationGet(appUid) +// ApplicationTaskListGet API call processor +func (e *Processor) ApplicationTaskListGet(c echo.Context, appUID types.ApplicationUID, params types.ApplicationTaskListGetParams) error { + app, err := e.fish.ApplicationGet(appUID) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUid)}) - return fmt.Errorf("Unable to find the Application: %s, %w", appUid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUID)}) + return fmt.Errorf("Unable to find the Application: %s, %w", appUID, err) } // Only the owner of the application (or admin) could get the tasks @@ -399,7 +417,7 @@ func (e *Processor) ApplicationTaskListGet(c echo.Context, appUid types.Applicat return fmt.Errorf("Only the owner of Application & admin can get the Application Tasks") } - out, err := e.fish.ApplicationTaskFindByApplication(appUid, params.Filter) + out, err := e.fish.ApplicationTaskFindByApplication(appUID, params.Filter) if err != nil { c.JSON(http.StatusInternalServerError, H{"message": fmt.Sprintf("Unable to get the Application Tasks list: %v", err)}) return fmt.Errorf("Unable to get the Application Tasks list: %w", err) @@ -408,11 +426,12 @@ func (e *Processor) ApplicationTaskListGet(c echo.Context, appUid types.Applicat return c.JSON(http.StatusOK, out) } -func (e *Processor) ApplicationTaskCreatePost(c echo.Context, appUid types.ApplicationUID) error { - app, err := e.fish.ApplicationGet(appUid) +// ApplicationTaskCreatePost API call processor +func (e *Processor) ApplicationTaskCreatePost(c echo.Context, appUID types.ApplicationUID) error { + app, err := e.fish.ApplicationGet(appUID) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUid)}) - return fmt.Errorf("Unable to find the Application: %s, %w", appUid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", appUID)}) + return fmt.Errorf("Unable to find the Application: %s, %w", appUID, err) } // Only the owner of the application (or admin) could create the tasks @@ -433,7 +452,7 @@ func (e *Processor) ApplicationTaskCreatePost(c echo.Context, appUid types.Appli } // Set Application UID for the task forcefully to not allow creating tasks for the other Apps - data.ApplicationUID = appUid + data.ApplicationUID = appUID if err := e.fish.ApplicationTaskCreate(&data); err != nil { c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to create ApplicationTask: %v", err)}) @@ -443,11 +462,12 @@ func (e *Processor) ApplicationTaskCreatePost(c echo.Context, appUid types.Appli return c.JSON(http.StatusOK, data) } -func (e *Processor) ApplicationTaskGet(c echo.Context, taskUid types.ApplicationTaskUID) error { - task, err := e.fish.ApplicationTaskGet(taskUid) +// ApplicationTaskGet API call processor +func (e *Processor) ApplicationTaskGet(c echo.Context, taskUID types.ApplicationTaskUID) error { + task, err := e.fish.ApplicationTaskGet(taskUID) if err != nil { - c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", taskUid)}) - return fmt.Errorf("Unable to find the ApplicationTask: %s, %w", taskUid, err) + c.JSON(http.StatusBadRequest, H{"message": fmt.Sprintf("Unable to find the Application: %s", taskUID)}) + return fmt.Errorf("Unable to find the ApplicationTask: %s, %w", taskUID, err) } app, err := e.fish.ApplicationGet(task.ApplicationUID) @@ -470,6 +490,7 @@ func (e *Processor) ApplicationTaskGet(c echo.Context, taskUid types.Application return c.JSON(http.StatusOK, task) } +// ApplicationDeallocateGet API call processor func (e *Processor) ApplicationDeallocateGet(c echo.Context, uid types.ApplicationUID) error { app, err := e.fish.ApplicationGet(uid) if err != nil { @@ -515,6 +536,7 @@ func (e *Processor) ApplicationDeallocateGet(c echo.Context, uid types.Applicati return c.JSON(http.StatusOK, as) } +// LabelListGet API call processor func (e *Processor) LabelListGet(c echo.Context, params types.LabelListGetParams) error { out, err := e.fish.LabelFind(params.Filter) if err != nil { @@ -525,6 +547,7 @@ func (e *Processor) LabelListGet(c echo.Context, params types.LabelListGetParams return c.JSON(http.StatusOK, out) } +// LabelGet API call processor func (e *Processor) LabelGet(c echo.Context, uid types.LabelUID) error { out, err := e.fish.LabelGet(uid) if err != nil { @@ -535,6 +558,7 @@ func (e *Processor) LabelGet(c echo.Context, uid types.LabelUID) error { return c.JSON(http.StatusOK, out) } +// LabelCreatePost API call processor func (e *Processor) LabelCreatePost(c echo.Context) error { // Only admin can create label user, ok := c.Get("user").(*types.User) @@ -560,6 +584,7 @@ func (e *Processor) LabelCreatePost(c echo.Context) error { return c.JSON(http.StatusOK, data) } +// LabelDelete API call processor func (e *Processor) LabelDelete(c echo.Context, uid types.LabelUID) error { // Only admin can delete label user, ok := c.Get("user").(*types.User) @@ -581,6 +606,7 @@ func (e *Processor) LabelDelete(c echo.Context, uid types.LabelUID) error { return c.JSON(http.StatusOK, H{"message": "Label removed"}) } +// NodeListGet API call processor func (e *Processor) NodeListGet(c echo.Context, params types.NodeListGetParams) error { out, err := e.fish.NodeFind(params.Filter) if err != nil { @@ -591,12 +617,14 @@ func (e *Processor) NodeListGet(c echo.Context, params types.NodeListGetParams) return c.JSON(http.StatusOK, out) } +// NodeThisGet API call processor func (e *Processor) NodeThisGet(c echo.Context) error { node := e.fish.GetNode() return c.JSON(http.StatusOK, node) } +// NodeThisMaintenanceGet API call processor func (e *Processor) NodeThisMaintenanceGet(c echo.Context, params types.NodeThisMaintenanceGetParams) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -633,11 +661,13 @@ func (e *Processor) NodeThisMaintenanceGet(c echo.Context, params types.NodeThis return c.JSON(http.StatusOK, params) } +// NodeThisProfilingIndexGet API call processor func (e *Processor) NodeThisProfilingIndexGet(c echo.Context) error { return e.NodeThisProfilingGet(c, "") } -func (e *Processor) NodeThisProfilingGet(c echo.Context, handler string) error { +// NodeThisProfilingGet API call processor +func (*Processor) NodeThisProfilingGet(c echo.Context, handler string) error { user, ok := c.Get("user").(*types.User) if !ok { c.JSON(http.StatusBadRequest, H{"message": "Not authentified"}) @@ -671,6 +701,7 @@ func (e *Processor) NodeThisProfilingGet(c echo.Context, handler string) error { return nil } +// VoteListGet API call processor func (e *Processor) VoteListGet(c echo.Context, params types.VoteListGetParams) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -691,6 +722,7 @@ func (e *Processor) VoteListGet(c echo.Context, params types.VoteListGetParams) return c.JSON(http.StatusOK, out) } +// LocationListGet API call processor func (e *Processor) LocationListGet(c echo.Context, params types.LocationListGetParams) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -711,6 +743,7 @@ func (e *Processor) LocationListGet(c echo.Context, params types.LocationListGet return c.JSON(http.StatusOK, out) } +// LocationCreatePost API call processor func (e *Processor) LocationCreatePost(c echo.Context) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -736,6 +769,7 @@ func (e *Processor) LocationCreatePost(c echo.Context) error { return c.JSON(http.StatusOK, data) } +// ServiceMappingGet API call processor func (e *Processor) ServiceMappingGet(c echo.Context, uid types.ServiceMappingUID) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -756,6 +790,7 @@ func (e *Processor) ServiceMappingGet(c echo.Context, uid types.ServiceMappingUI return c.JSON(http.StatusOK, out) } +// ServiceMappingListGet API call processor func (e *Processor) ServiceMappingListGet(c echo.Context, params types.ServiceMappingListGetParams) error { user, ok := c.Get("user").(*types.User) if !ok { @@ -776,6 +811,7 @@ func (e *Processor) ServiceMappingListGet(c echo.Context, params types.ServiceMa return c.JSON(http.StatusOK, out) } +// ServiceMappingCreatePost API call processor func (e *Processor) ServiceMappingCreatePost(c echo.Context) error { var data types.ServiceMapping if err := c.Bind(&data); err != nil { @@ -813,6 +849,7 @@ func (e *Processor) ServiceMappingCreatePost(c echo.Context) error { return c.JSON(http.StatusOK, data) } +// ServiceMappingDelete API call processor func (e *Processor) ServiceMappingDelete(c echo.Context, uid types.ServiceMappingUID) error { // Only admin can delete ServiceMapping user, ok := c.Get("user").(*types.User) diff --git a/lib/openapi/meta/meta_v1.go b/lib/openapi/meta/meta_v1.go index 3bd96f8..40e854b 100644 --- a/lib/openapi/meta/meta_v1.go +++ b/lib/openapi/meta/meta_v1.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package meta provides META-API for the resources package meta import ( @@ -28,12 +29,14 @@ import ( // H is a shortcut for map[string]any type H map[string]any +// Processor doing processing of the META-API request type Processor struct { fish *fish.Fish } -func NewV1Router(e *echo.Echo, fish *fish.Fish) { - proc := &Processor{fish: fish} +// NewV1Router creates router for META-APIv1 +func NewV1Router(e *echo.Echo, f *fish.Fish) { + proc := &Processor{fish: f} router := e.Group("") router.Use( // Only the local interface which we own can request @@ -42,6 +45,7 @@ func NewV1Router(e *echo.Echo, fish *fish.Fish) { RegisterHandlers(router, proc) } +// AddressAuth middleware to ensure META-API will not be used by crocodile func (e *Processor) AddressAuth(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { // Only the existing local resource access it's metadata @@ -56,7 +60,8 @@ func (e *Processor) AddressAuth(next echo.HandlerFunc) echo.HandlerFunc { } } -func (e *Processor) Return(c echo.Context, code int, obj map[string]any) error { +// Return middleware which processes the return data and represents it as requestor want to see it +func (*Processor) Return(c echo.Context, code int, obj map[string]any) error { format := c.QueryParam("format") if len(format) == 0 { format = "json" @@ -78,7 +83,8 @@ func (e *Processor) Return(c echo.Context, code int, obj map[string]any) error { return nil } -func (e *Processor) DataGetList(c echo.Context, params types.DataGetListParams) error { +// DataGetList returns metadata assigned to the Resource +func (e *Processor) DataGetList(c echo.Context, _ /*params*/ types.DataGetListParams) error { var metadata map[string]any resInt := c.Get("resource") @@ -97,7 +103,8 @@ func (e *Processor) DataGetList(c echo.Context, params types.DataGetListParams) return e.Return(c, http.StatusOK, metadata) } -func (e *Processor) DataGet(c echo.Context, keyPath string, params types.DataGetParams) error { +// DataGet should return specific key value from the Resource metadata +func (e *Processor) DataGet(c echo.Context, _ /*keyPath*/ string, _ /*params*/ types.DataGetParams) error { // TODO: implement it e.Return(c, http.StatusNotFound, H{"message": "TODO: Not implemented"}) return fmt.Errorf("TODO: Not implemented") diff --git a/lib/openapi/openapi.go b/lib/openapi/openapi.go index 0d604a6..fb229af 100644 --- a/lib/openapi/openapi.go +++ b/lib/openapi/openapi.go @@ -15,6 +15,7 @@ //go:generate oapi-codegen -config api_v1.cfg.yaml ../../docs/openapi.yaml //go:generate oapi-codegen -config spec.cfg.yaml ../../docs/openapi.yaml +// Package openapi provides generated from OpenAPI spec API framework package openapi import ( @@ -30,7 +31,7 @@ import ( "github.com/labstack/echo/v4" echomw "github.com/labstack/echo/v4/middleware" - _ "github.com/oapi-codegen/oapi-codegen/v2/pkg/util" + _ "github.com/oapi-codegen/oapi-codegen/v2/pkg/util" // We need util here otherwise it will not load the needed imports and fail go.mod vetting "gopkg.in/yaml.v3" "github.com/adobe/aquarium-fish/lib/fish" @@ -39,9 +40,11 @@ import ( "github.com/adobe/aquarium-fish/lib/openapi/meta" ) +// YamlBinder is used to decode yaml requests type YamlBinder struct{} -func (cb *YamlBinder) Bind(i any, c echo.Context) (err error) { +// Bind allows to parse Yaml request data +func (*YamlBinder) Bind(i any, c echo.Context) (err error) { db := &echo.DefaultBinder{} if err = db.Bind(i, c); err != echo.ErrUnsupportedMediaType { return @@ -64,7 +67,8 @@ func (cb *YamlBinder) Bind(i any, c echo.Context) (err error) { return } -func Init(fish *fish.Fish, apiAddress, caPath, certPath, keyPath string) (*http.Server, error) { +// Init startups the API server to listen for incoming requests +func Init(f *fish.Fish, apiAddress, caPath, certPath, keyPath string) (*http.Server, error) { swagger, err := GetSwagger() if err != nil { return nil, fmt.Errorf("Fish OpenAPI: Error loading swagger spec: %w", err) @@ -85,8 +89,8 @@ func Init(fish *fish.Fish, apiAddress, caPath, certPath, keyPath string) (*http. // TODO: Probably it will be a feature an ability to separate those // routers to independence ports if needed - meta.NewV1Router(router, fish) - api.NewV1Router(router, fish) + meta.NewV1Router(router, f) + api.NewV1Router(router, f) // TODO: web UI router caPool := x509.NewCertPool() diff --git a/lib/openapi/types/authentication.go b/lib/openapi/types/authentication.go index eda40b0..3bb21da 100644 --- a/lib/openapi/types/authentication.go +++ b/lib/openapi/types/authentication.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package types stores generated types and their special functions package types import ( @@ -18,10 +19,12 @@ import ( "fmt" ) -func (auth Authentication) GormDataType() string { +// GormDataType describes how to store Authentication in database +func (Authentication) GormDataType() string { return "blob" } +// Scan converts the Authentication to json bytes func (auth *Authentication) Scan(value any) error { bytes, ok := value.([]byte) if !ok { @@ -32,6 +35,7 @@ func (auth *Authentication) Scan(value any) error { return err } +// Value converts json bytes to Authentication func (auth Authentication) Value() (driver.Value, error) { return json.Marshal(auth) } diff --git a/lib/openapi/types/label_definitions.go b/lib/openapi/types/label_definitions.go index 2d11fe0..f183af7 100644 --- a/lib/openapi/types/label_definitions.go +++ b/lib/openapi/types/label_definitions.go @@ -18,10 +18,12 @@ import ( "fmt" ) -func (ld LabelDefinitions) GormDataType() string { +// GormDataType describes how to store LabelDefinitions in database +func (LabelDefinitions) GormDataType() string { return "blob" } +// Scan converts the LabelDefinitions to json bytes func (ld *LabelDefinitions) Scan(value any) error { bytes, ok := value.([]byte) if !ok { @@ -38,6 +40,7 @@ func (ld *LabelDefinitions) Scan(value any) error { return err } +// Value converts json bytes to LabelDefinitions func (ld LabelDefinitions) Value() (driver.Value, error) { // Need to make sure the array node filter will not be nil for i, r := range ld { diff --git a/lib/openapi/types/node.go b/lib/openapi/types/node.go index b632e3b..f0df30b 100644 --- a/lib/openapi/types/node.go +++ b/lib/openapi/types/node.go @@ -20,11 +20,10 @@ import ( "os" ) +// NodePingDelay defines delay between the pings to keep the node active in the cluster const NodePingDelay = 10 -var ErrNodePingDuplication = fmt.Errorf("Fish Node: Unable to join the Aquarium cluster due to " + - "the node with the same name pinged the cluster less then 2xNODE_PING_DELAY time ago") - +// Init prepares Node for usage func (n *Node) Init(nodeAddress, certPath string) error { // Set the node external address n.Address = nodeAddress diff --git a/lib/openapi/types/node_definition.go b/lib/openapi/types/node_definition.go index 2dd41c6..0583a37 100644 --- a/lib/openapi/types/node_definition.go +++ b/lib/openapi/types/node_definition.go @@ -24,10 +24,12 @@ import ( "github.com/shirou/gopsutil/v3/net" ) -func (nd NodeDefinition) GormDataType() string { +// GormDataType describes how to store NodeDefinition in database +func (NodeDefinition) GormDataType() string { return "blob" } +// Scan converts the NodeDefinition to json bytes func (nd *NodeDefinition) Scan(value any) error { bytes, ok := value.([]byte) if !ok { @@ -38,10 +40,12 @@ func (nd *NodeDefinition) Scan(value any) error { return err } +// Value converts json bytes to NodeDefinition func (nd NodeDefinition) Value() (driver.Value, error) { return json.Marshal(nd) } +// Update syncs the NodeDefinition to the current machine state func (nd *NodeDefinition) Update() { nd.Host, _ = host.Info() nd.Memory, _ = mem.VirtualMemory() diff --git a/lib/openapi/types/resources.go b/lib/openapi/types/resources.go index cce401a..b36c02c 100644 --- a/lib/openapi/types/resources.go +++ b/lib/openapi/types/resources.go @@ -21,10 +21,12 @@ import ( "github.com/adobe/aquarium-fish/lib/util" ) -func (r Resources) GormDataType() string { +// GormDataType describes how to store Resources in database +func (Resources) GormDataType() string { return "blob" } +// Scan converts the Resources to json bytes func (r *Resources) Scan(value any) error { bytes, ok := value.([]byte) if !ok { @@ -41,6 +43,7 @@ func (r *Resources) Scan(value any) error { return err } +// Value converts json bytes to Resources func (r Resources) Value() (driver.Value, error) { // Init the value, otherwise will return undesired nil if r.NodeFilter == nil { @@ -49,6 +52,7 @@ func (r Resources) Value() (driver.Value, error) { return json.Marshal(r) } +// Validate makes sure the Resources are defined correctly func (r *Resources) Validate(diskTypes []string, checkNet bool) error { // Check resources if r.Cpu < 1 { @@ -84,7 +88,7 @@ func (r *Resources) Validate(diskTypes []string, checkNet bool) error { return nil } -// Adds the Resources data to the existing data +// Add increases the Resources utilization by provided Resources func (r *Resources) Add(res Resources) error { if r.Cpu == 0 && r.Ram == 0 { // Set tenancy modificators for the first resource @@ -101,7 +105,7 @@ func (r *Resources) Add(res Resources) error { return nil } -// Subtracts the Resources data to the existing data +// Subtract decreases utilization of Resources by provided Resources func (r *Resources) Subtract(res Resources) (err error) { if r.Cpu < res.Cpu { err = fmt.Errorf("Resources: Unable to subtract more CPU than we have: %d < %d", r.Cpu, res.Cpu) @@ -124,7 +128,7 @@ func (r *Resources) Subtract(res Resources) (err error) { return } -// Checks if the Resources are filled with some values +// IsEmpty checks if the Resources are filled with some values func (r *Resources) IsEmpty() bool { if r.Cpu != 0 { return false diff --git a/lib/proxy_socks/proxy.go b/lib/proxysocks/proxy.go similarity index 73% rename from lib/proxy_socks/proxy.go rename to lib/proxysocks/proxy.go index f1bcd97..54ca95d 100644 --- a/lib/proxy_socks/proxy.go +++ b/lib/proxysocks/proxy.go @@ -10,7 +10,8 @@ * governing permissions and limitations under the License. */ -package proxy_socks +// Package proxysocks implements socks5 proxy that could be used by the Resource VM to reach outside world +package proxysocks import ( "net" @@ -22,18 +23,22 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) +// ResolverSkip is needed to skip the resolving type ResolverSkip struct{} -func (d ResolverSkip) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { +// Resolve function makes skip possible +func (ResolverSkip) Resolve(ctx context.Context, _ /*name*/ string) (context.Context, net.IP, error) { // It's impossible to verify the access of the client // and determine the service mapping here so skipping this step return ctx, net.IP{}, nil } +// ProxyAccess configuration to store context while processing the proxy request type ProxyAccess struct { fish *fish.Fish } +// Allow will be executed to allow or deny proxy request func (p *ProxyAccess) Allow(ctx context.Context, req *socks5.Request) (context.Context, bool) { log.Debug("Proxy: Requested proxy from", req.RemoteAddr, "to", req.DestAddr) @@ -49,7 +54,7 @@ func (p *ProxyAccess) Allow(ctx context.Context, req *socks5.Request) (context.C if dest == "" { dest = req.DestAddr.IP.String() } - overDest := p.fish.ResourceServiceMapping(res, dest) + overDest := p.fish.ResourceServiceMappingByApplicationAndDest(res.ApplicationUID, dest) if overDest == "" { log.Warn("Proxy: Denied proxy from", req.RemoteAddr, "to", req.DestAddr) return ctx, false @@ -71,10 +76,11 @@ func (p *ProxyAccess) Allow(ctx context.Context, req *socks5.Request) (context.C return ctx, true } -func Init(fish *fish.Fish, address string) error { +// Init will start the socks5 proxy server +func Init(f *fish.Fish, address string) error { conf := &socks5.Config{ - Resolver: &ResolverSkip{}, // Skipping the resolver phase until access checked - Rules: &ProxyAccess{fish}, // Allow only known resources to access proxy + Resolver: &ResolverSkip{}, // Skipping the resolver phase until access checked + Rules: &ProxyAccess{f}, // Allow only known resources to access proxy } server, err := socks5.New(conf) diff --git a/lib/proxy_ssh/proxy.go b/lib/proxyssh/proxy.go similarity index 96% rename from lib/proxy_ssh/proxy.go rename to lib/proxyssh/proxy.go index cf822ae..2c3781c 100644 --- a/lib/proxy_ssh/proxy.go +++ b/lib/proxyssh/proxy.go @@ -33,7 +33,8 @@ * SOFTWARE. */ -package proxy_ssh +// Package proxyssh allows to access the remote resources through ssh proxy +package proxyssh import ( "crypto/rand" @@ -59,6 +60,7 @@ func init() { log.Info("The Fish SSH proxy is a re-implementation of Remco Verhoef's MIT licensed example (https://github.com/dutchcoders/sshproxy)") } +// ProxyAccess keeps state of the SSH server type ProxyAccess struct { fish *fish.Fish serverConfig *ssh.ServerConfig @@ -68,7 +70,7 @@ type ProxyAccess struct { sessions sync.Map } -// Stored in ProxyAccess::sessions. +// SessionRecord stored in ProxyAccess::sessions. type SessionRecord struct { ResourceAccessor *types.ResourceAccess RemoteAddr net.Addr @@ -201,8 +203,8 @@ func (p *ProxyAccess) serveConnection(conn net.Conn, serverConfig *ssh.ServerCon // These are kept for safety to ensure the channels are indeed closed, // but in theory the ProxyLoop will close the channels and that will // signal to io.Copy that we are complete. - defer localChannel.Close() - defer remoteChannel.Close() + defer localChannel.Close() //nolint:revive + defer remoteChannel.Close() //nolint:revive } log.Debugf("Connection between %q and %q closed.", conn.RemoteAddr(), remoteConn.RemoteAddr()) @@ -260,7 +262,8 @@ func (p *ProxyAccess) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh return nil, fmt.Errorf("invalid access") } -func Init(fish *fish.Fish, idRsaPath string, address string) error { +// Init starts SSH proxy +func Init(f *fish.Fish, idRsaPath string, address string) error { // First, try and read the file if it exists already. Otherwise, it is the // first execution, generate the private / public keys. The SSH server // requires at least one identity loaded to run. @@ -297,7 +300,7 @@ func Init(fish *fish.Fish, idRsaPath string, address string) error { return fmt.Errorf("proxy_ssh: failed to parse private key: %w", err) } - sshProxy := ProxyAccess{fish: fish} + sshProxy := ProxyAccess{fish: f} sshProxy.serverConfig = &ssh.ServerConfig{ PasswordCallback: sshProxy.passwordCallback, } diff --git a/lib/util/contains.go b/lib/util/contains.go index 6c7e45a..84ccdbd 100644 --- a/lib/util/contains.go +++ b/lib/util/contains.go @@ -10,8 +10,10 @@ * governing permissions and limitations under the License. */ +// Package util contains multiple utils for Fish package util +// Contains check the string slice contains string in it func Contains(list []string, value string) bool { for _, v := range list { if v == value { diff --git a/lib/util/dot_serialize.go b/lib/util/dot_serialize.go index 0512ab5..3e539a0 100644 --- a/lib/util/dot_serialize.go +++ b/lib/util/dot_serialize.go @@ -17,7 +17,7 @@ import ( "reflect" ) -// Simple serializer to get map as key.subkey=value with dot separation for the keys +// DotSerialize serializes data to get map as key.subkey=value with dot separation for the keys func DotSerialize(prefix string, in any) map[string]string { out := make(map[string]string) diff --git a/lib/util/duration.go b/lib/util/duration.go index 82d521c..6ba8ba3 100644 --- a/lib/util/duration.go +++ b/lib/util/duration.go @@ -18,14 +18,17 @@ import ( "time" ) +// Duration is a simple wrapper to add serialization functions type Duration time.Duration +// MarshalJSON represents Duration as JSON string func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(time.Duration(d).String()) } +// UnmarshalJSON parses JSON string as Duration func (d *Duration) UnmarshalJSON(b []byte) error { - var v interface{} + var v any if err := json.Unmarshal(b, &v); err != nil { return err } diff --git a/lib/util/expression_sql_filter.go b/lib/util/expression_sql_filter.go index f522ddc..d1ab883 100644 --- a/lib/util/expression_sql_filter.go +++ b/lib/util/expression_sql_filter.go @@ -18,13 +18,13 @@ import ( "github.com/rqlite/sql" ) -// Ensures the where filter doesn't contain bad things (SQL injections) and returns a good one -// could be used as Where() in gorm. It expects just an expression, so no other SQL keys will work +// ExpressionSQLFilter ensures the where filter doesn't contain bad things (SQL injections) and returns a +// good one could be used as Where() in gorm. It expects just an expression, so no other SQL keys will work // here. For example: // * `id=1 AND a in (1,2) ORDER BY i; DROP u;` will become just `"id" = 1 AND "a" IN (1, 2)` // * `DROP users` - will fail // * `id = 1 OR lol in (SELECT * FROM users)` - will fail -func ExpressionSqlFilter(filter string) (string, error) { +func ExpressionSQLFilter(filter string) (string, error) { reader := strings.NewReader(filter) exp, err := sql.NewParser(reader).ParseExpr() if err != nil { diff --git a/lib/util/expression_sql_filter_test.go b/lib/util/expression_sql_filter_test.go index 57005e4..788e89f 100644 --- a/lib/util/expression_sql_filter_test.go +++ b/lib/util/expression_sql_filter_test.go @@ -18,23 +18,23 @@ import ( ) var ( - TestSqlExpressionInjections = map[string]string{ - ``: ``, - `1=1`: `1 = 1`, - `id = 3; DROP users`: `"id" = 3`, - `a IN (1,2) ORDER BY id; DROP users`: `"a" IN (1, 2)`, + testSQLExpressionInjections = [][2]string{ + {``, ``}, + {`1=1`, `1 = 1`}, + {`id = 3; DROP users`, `"id" = 3`}, + {`a IN (1,2) ORDER BY id; DROP users`, `"a" IN (1, 2)`}, // Fails - `SELECT * FROM users WHERE a = 1; DROP users`: ``, // Invalid expression - `a in (SELECT * FROM users)`: ``, // Subquery could be dangerous + {`SELECT * FROM users WHERE a = 1; DROP users`, ``}, // Invalid expression + {`a in (SELECT * FROM users)`, ``}, // Subquery could be dangerous } ) func Test_expression_sql_filter_where_injections(t *testing.T) { - for sql, result := range TestSqlExpressionInjections { - t.Run(fmt.Sprintf("Testing `%s`", sql), func(t *testing.T) { - out, err := ExpressionSqlFilter(sql) - if out != result { - t.Fatalf("ExpressionSQLFilter(`%s`) = `%s`, %v; want: `%s`", sql, out, err, result) + for _, sqlAndResult := range testSQLExpressionInjections { + t.Run(fmt.Sprintf("Testing `%s`", sqlAndResult[0]), func(t *testing.T) { + out, err := ExpressionSQLFilter(sqlAndResult[0]) + if out != sqlAndResult[1] { + t.Fatalf("ExpressionSQLFilter(`%s`) = `%s`, %v; want: `%s`", sqlAndResult[0], out, err, sqlAndResult[1]) } }) } diff --git a/lib/util/file_copy.go b/lib/util/file_copy.go index 5953c89..702e5b4 100644 --- a/lib/util/file_copy.go +++ b/lib/util/file_copy.go @@ -17,6 +17,7 @@ import ( "os" ) +// FileCopy copies from one place to another func FileCopy(src string, dst string) error { source, err := os.Open(src) if err != nil { diff --git a/lib/util/file_replace_block.go b/lib/util/file_replace_block.go index 6f9718d..2ca06cd 100644 --- a/lib/util/file_replace_block.go +++ b/lib/util/file_replace_block.go @@ -21,6 +21,7 @@ import ( "strings" ) +// FileReplaceBlock is a simple block replace in the file func FileReplaceBlock(path, blockFrom, blockTo string, lines ...string) error { // Open input file inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) @@ -96,9 +97,7 @@ func FileReplaceBlock(path, blockFrom, blockTo string, lines ...string) error { } // Replace input file with out file - if err := os.Rename(outF.Name(), path); err != nil { - return err - } + err = os.Rename(outF.Name(), path) - return nil + return err } diff --git a/lib/util/file_replace_token.go b/lib/util/file_replace_token.go index 68a56a1..44933b2 100644 --- a/lib/util/file_replace_token.go +++ b/lib/util/file_replace_token.go @@ -21,6 +21,7 @@ import ( "strings" ) +// FileReplaceToken simple replaces tocken in the file func FileReplaceToken(path string, fullLine, add, anycase bool, tokenValues ...string) error { // Open input file inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) @@ -74,21 +75,20 @@ func FileReplaceToken(path string, fullLine, add, anycase bool, tokenValues ...s if fullLine { line = value break // No need to check the other tokens - } else { - if anycase { - // We're not using RE because it's hard to predict the token - // and escape it to compile the proper regular expression - // so instead we using just regular replace by position of the token - idx := strings.Index(compLine, tokens[i]) - for idx != -1 { - // To support unicode use runes - line = string([]rune(line)[0:idx]) + value + string([]rune(line)[idx+len(tokens[i]):len(line)]) - compLine = strings.ToLower(line) - idx = strings.Index(compLine, tokens[i]) - } - } else { - line = strings.ReplaceAll(line, tokens[i], value) + } + if anycase { + // We're not using RE because it's hard to predict the token + // and escape it to compile the proper regular expression + // so instead we using just regular replace by position of the token + idx := strings.Index(compLine, tokens[i]) + for idx != -1 { + // To support unicode use runes + line = string([]rune(line)[0:idx]) + value + string([]rune(line)[idx+len(tokens[i]):len(line)]) + compLine = strings.ToLower(line) + idx = strings.Index(compLine, tokens[i]) } + } else { + line = strings.ReplaceAll(line, tokens[i], value) } } } @@ -123,9 +123,7 @@ func FileReplaceToken(path string, fullLine, add, anycase bool, tokenValues ...s } // Replace input file with out file - if err := os.Rename(outF.Name(), path); err != nil { - return err - } + err = os.Rename(outF.Name(), path) - return nil + return err } diff --git a/lib/util/file_starts_with.go b/lib/util/file_starts_with.go index 878af3a..170a5b2 100644 --- a/lib/util/file_starts_with.go +++ b/lib/util/file_starts_with.go @@ -19,11 +19,12 @@ import ( ) var ( - ErrFileStartsWithDirectory = fmt.Errorf("FileStartsWith: Unable to check file prefix for directory") - ErrFileStartsWithFileTooSmall = fmt.Errorf("FileStartsWith: File is too small for prefix") - ErrFileStartsWithNotEqual = fmt.Errorf("FileStartsWith: File is not starts with the prefix") + errFileStartsWithDirectory = fmt.Errorf("FileStartsWith: Unable to check file prefix for directory") + errFileStartsWithFileTooSmall = fmt.Errorf("FileStartsWith: File is too small for prefix") + errFileStartsWithNotEqual = fmt.Errorf("FileStartsWith: File is not starts with the prefix") ) +// FileStartsWith checks the file starts with required prefix func FileStartsWith(path string, prefix []byte) error { // Open input file inF, err := os.OpenFile(path, os.O_RDONLY, 0o644) @@ -34,7 +35,7 @@ func FileStartsWith(path string, prefix []byte) error { // Check it's not a dir if info, err := inF.Stat(); err == nil && info.IsDir() { - return ErrFileStartsWithDirectory + return errFileStartsWithDirectory } buf := make([]byte, len(prefix)) @@ -43,12 +44,12 @@ func FileStartsWith(path string, prefix []byte) error { return err } if length != len(prefix) { - return ErrFileStartsWithFileTooSmall + return errFileStartsWithFileTooSmall } if bytes.Equal(prefix, buf) { return nil } - return ErrFileStartsWithNotEqual + return errFileStartsWithNotEqual } diff --git a/lib/util/file_starts_with_test.go b/lib/util/file_starts_with_test.go index 3d6e4ec..ead576e 100644 --- a/lib/util/file_starts_with_test.go +++ b/lib/util/file_starts_with_test.go @@ -41,16 +41,16 @@ func TestFileStartsNotEqual(t *testing.T) { os.WriteFile(tmpFile, inData, 0o644) - if err := FileStartsWith(tmpFile, []byte("test2 ")); err != ErrFileStartsWithNotEqual { - t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithNotEqual) + if err := FileStartsWith(tmpFile, []byte("test2 ")); err != errFileStartsWithNotEqual { + t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, errFileStartsWithNotEqual) } } func TestFileStartsDirectory(t *testing.T) { tmpFile := t.TempDir() - if err := FileStartsWith(tmpFile, []byte("test2 ")); err != ErrFileStartsWithDirectory { - t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithDirectory) + if err := FileStartsWith(tmpFile, []byte("test2 ")); err != errFileStartsWithDirectory { + t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, errFileStartsWithDirectory) } } @@ -61,7 +61,7 @@ func TestFileStartsSmall(t *testing.T) { os.WriteFile(tmpFile, inData, 0o644) - if err := FileStartsWith(tmpFile, []byte("biiiiiiiiiig prefix")); err != ErrFileStartsWithFileTooSmall { - t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, ErrFileStartsWithFileTooSmall) + if err := FileStartsWith(tmpFile, []byte("biiiiiiiiiig prefix")); err != errFileStartsWithFileTooSmall { + t.Fatalf(`FileStartsWith("test2 ") = %v, want: %v`, err, errFileStartsWithFileTooSmall) } } diff --git a/lib/util/human_size.go b/lib/util/human_size.go index e75ce8d..91fb289 100644 --- a/lib/util/human_size.go +++ b/lib/util/human_size.go @@ -18,8 +18,10 @@ import ( "strings" ) +// HumanSize describes data size In Human Form type HumanSize uint64 +// Definitions of different byte sizes and some maximums const ( B HumanSize = 1 KB = B << 10 @@ -29,28 +31,29 @@ const ( PB = TB << 10 EB = PB << 10 - fnUnmarshalText string = "UnmarshalText" - maxUint64 uint64 = (1 << 64) - 1 - cutoff uint64 = maxUint64 / 10 + maxUint64 uint64 = (1 << 64) - 1 ) +// NewHumanSize creates human size for you func NewHumanSize(input string) (HumanSize, error) { var hs HumanSize err := hs.UnmarshalText([]byte(input)) return hs, err } +// MarshalText represents HumanSize as string func (hs HumanSize) MarshalText() ([]byte, error) { return []byte(hs.String()), nil } +// UnmarshalText converts text to HumanSize number // To be properly parsed the text should contain number and unit ("B", "KB", "MB"...) in the end func (hs *HumanSize) UnmarshalText(data []byte) error { input := strings.TrimSpace(string(data)) length := len(input) // Detecting unit & multiplier - var mult HumanSize = 0 + var mult HumanSize var unit string var unitLen int if length > 1 { @@ -109,10 +112,12 @@ func (hs *HumanSize) UnmarshalText(data []byte) error { return nil } +// Bytes returns amount of bytes stored in HumanSize func (hs HumanSize) Bytes() uint64 { return uint64(hs) } +// String represent HumanSize as human readable string func (hs HumanSize) String() string { switch { case hs == 0: diff --git a/lib/util/lock.go b/lib/util/lock.go index 56751c7..6525a18 100644 --- a/lib/util/lock.go +++ b/lib/util/lock.go @@ -25,7 +25,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) -// The function creates the lock file, notice - remove it yourself +// CreateLock creates the lock file, notice - remove it yourself func CreateLock(lockPath string) error { lockFile, err := os.Create(lockPath) if err != nil { @@ -40,7 +40,7 @@ func CreateLock(lockPath string) error { return nil } -// Wait for the lock file and clean func will be executed if it's invalid +// WaitLock waits for the lock file and clean func will be executed if it's invalid func WaitLock(lockPath string, clean func()) error { waitCounter := 0 for { @@ -72,7 +72,7 @@ func WaitLock(lockPath string, clean func()) error { } time.Sleep(5 * time.Second) - waitCounter += 1 + waitCounter++ } return nil diff --git a/lib/util/metadata_processing.go b/lib/util/metadata_processing.go index 5048ce6..ce8651a 100644 --- a/lib/util/metadata_processing.go +++ b/lib/util/metadata_processing.go @@ -8,7 +8,7 @@ import ( "github.com/alessio/shellescape" ) -// Serializes dictionary to usable format +// SerializeMetadata serializes dictionary to usable format func SerializeMetadata(format, prefix string, data map[string]any) (out []byte, err error) { switch format { case "json": // Default json diff --git a/lib/util/passthrough_monitor.go b/lib/util/passthrough_monitor.go index d5b1164..75c6859 100644 --- a/lib/util/passthrough_monitor.go +++ b/lib/util/passthrough_monitor.go @@ -19,7 +19,7 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) -// Wraps an existing io.Reader to monitor the stream +// PassThruMonitor wraps an existing io.Reader to monitor the stream // // It simply forwards the Read() call, while displaying // the results from individual calls to it. diff --git a/lib/util/streamlog_monitor.go b/lib/util/streamlog_monitor.go index e4b7345..4a52a3c 100644 --- a/lib/util/streamlog_monitor.go +++ b/lib/util/streamlog_monitor.go @@ -18,21 +18,21 @@ import ( "github.com/adobe/aquarium-fish/lib/log" ) -var LineBreak = []byte("\n") -var EmptyByte = []byte{} +var lineBreak = []byte("\n") +var emptyByte = []byte{} -// Wraps an existing io.Reader to monitor the log stream and adds prefix before each line +// StreamLogMonitor wraps an existing io.Reader to monitor the log stream and adds prefix before each line type StreamLogMonitor struct { Prefix string // Prefix for the line linebuf [][]byte // Where line will live until EOL or close } -// Read 'overrides' the underlying io.Reader's Read method +// Write will read 'overrides' the underlying io.Reader's Read method func (slm *StreamLogMonitor) Write(p []byte) (int, error) { index := 0 prevIndex := 0 for index < len(p) { - index += bytes.Index(p[prevIndex:], LineBreak) + index += bytes.Index(p[prevIndex:], lineBreak) if index == -1 { // The data does not contain EOL, so appending to buffer and wait slm.linebuf = append(slm.linebuf, p) @@ -41,7 +41,7 @@ func (slm *StreamLogMonitor) Write(p []byte) (int, error) { // The newline was found, so prepending the line buffer and print it out // We don't need the EOF in the line (log.Infof adds), so increment index after processing slm.linebuf = append(slm.linebuf, p[prevIndex:index]) - log.Info(slm.Prefix + string(bytes.Join(slm.linebuf, EmptyByte))) + log.Info(slm.Prefix + string(bytes.Join(slm.linebuf, emptyByte))) clear(slm.linebuf) index++ prevIndex = index diff --git a/lib/util/unparsed_json.go b/lib/util/unparsed_json.go index d5cc273..9ed751c 100644 --- a/lib/util/unparsed_json.go +++ b/lib/util/unparsed_json.go @@ -18,20 +18,23 @@ import ( "gopkg.in/yaml.v3" ) -type UnparsedJson string +// UnparsedJSON is used to store json as is and not parse it until the right time +type UnparsedJSON string -func (r UnparsedJson) MarshalJSON() ([]byte, error) { +// MarshalJSON represents UnparsedJson as bytes +func (r UnparsedJSON) MarshalJSON() ([]byte, error) { return []byte(r), nil } -func (r *UnparsedJson) UnmarshalJSON(b []byte) error { +// UnmarshalJSON converts bytes to UnparsedJson +func (r *UnparsedJSON) UnmarshalJSON(b []byte) error { // Store json as string - *r = UnparsedJson(b) + *r = UnparsedJSON(b) return nil } -// To properly convert incoming yaml requests into json -func (r *UnparsedJson) UnmarshalYAML(node *yaml.Node) error { +// UnmarshalYAML is needed to properly convert incoming yaml requests into json +func (r *UnparsedJSON) UnmarshalYAML(node *yaml.Node) error { var value any if err := node.Decode(&value); err != nil { return err diff --git a/tests/allocate_apps_stress_test.go b/tests/allocate_apps_stress_test.go index 344433b..66a2226 100644 --- a/tests/allocate_apps_stress_test.go +++ b/tests/allocate_apps_stress_test.go @@ -66,7 +66,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [ {"driver":"test", "resources":{"cpu":1,"ram":2}} ]}`). @@ -105,7 +105,7 @@ func allocateAppsStressWorker(t *testing.T, wg *sync.WaitGroup, id int, afi *h.A t.Run(fmt.Sprintf("%04d Create Application", id), func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). diff --git a/tests/allocate_multidefinition_label_test.go b/tests/allocate_multidefinition_label_test.go index 8915fb1..71d5326 100644 --- a/tests/allocate_multidefinition_label_test.go +++ b/tests/allocate_multidefinition_label_test.go @@ -63,7 +63,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [ {"driver":"test", "resources":{"cpu":4,"ram":8}}, {"driver":"test", "resources":{"cpu":2,"ram":4}}, @@ -84,7 +84,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -102,7 +102,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -119,7 +119,7 @@ drivers: t.Run("Resource should be created with 1 in definition index", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -137,7 +137,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -148,7 +148,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -165,7 +165,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":2, "definitions": [ {"driver":"test", "resources":{"cpu":8,"ram":16}}, {"driver":"test", "resources":{"cpu":4,"ram":8}}, @@ -185,7 +185,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -202,7 +202,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -218,7 +218,7 @@ drivers: t.Run("Resource should be created with 1 in definition index", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -236,7 +236,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -247,7 +247,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -264,7 +264,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":3, "definitions": [ {"driver":"test", "resources":{"cpu":8,"ram":16}}, {"driver":"test", "resources":{"cpu":6,"ram":6}}, @@ -285,7 +285,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -302,7 +302,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -318,7 +318,7 @@ drivers: t.Run("Resource should be created with 1 in definition index", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -336,7 +336,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -347,7 +347,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/application_task_notexisting_fail_test.go b/tests/application_task_notexisting_fail_test.go index 8c67226..7589eeb 100644 --- a/tests/application_task_notexisting_fail_test.go +++ b/tests/application_task_notexisting_fail_test.go @@ -59,7 +59,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -76,7 +76,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -94,7 +94,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -111,7 +111,7 @@ drivers: t.Run("Create ApplicationTask Snapshot", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Post(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). JSON(map[string]any{"task": "NOTEXISTING_TASK", "when": types.ApplicationStatusALLOCATED}). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -129,7 +129,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -151,7 +151,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -162,7 +162,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/application_task_snapshot_by_user_test.go b/tests/application_task_snapshot_by_user_test.go index d292629..696f400 100644 --- a/tests/application_task_snapshot_by_user_test.go +++ b/tests/application_task_snapshot_by_user_test.go @@ -59,7 +59,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -75,7 +75,7 @@ drivers: t.Run("Create User", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/user/")). + Post(afi.APIAddress("api/v1/user/")). JSON(`{"name":"test-user", "password":"test-user-password"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -92,7 +92,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("test-user", "test-user-password"). Expect(t). @@ -110,7 +110,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("test-user", "test-user-password"). Expect(r). Status(http.StatusOK). @@ -127,7 +127,7 @@ drivers: t.Run("Resource should be created", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("test-user", "test-user-password"). Expect(t). Status(http.StatusOK). @@ -143,7 +143,7 @@ drivers: t.Run("Create ApplicationTask 1 Snapshot on ALLOCATE", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Post(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). JSON(map[string]any{"task": "snapshot", "when": types.ApplicationStatusALLOCATED}). BasicAuth("test-user", "test-user-password"). Expect(t). @@ -160,7 +160,7 @@ drivers: t.Run("Create ApplicationTask 2 Snapshot on DEALLOCATE", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Post(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). JSON(map[string]any{"task": "snapshot", "when": types.ApplicationStatusDEALLOCATE}). BasicAuth("test-user", "test-user-password"). Expect(t). @@ -178,7 +178,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). BasicAuth("test-user", "test-user-password"). Expect(r). Status(http.StatusOK). @@ -206,7 +206,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("test-user", "test-user-password"). Expect(t). Status(http.StatusOK). @@ -217,7 +217,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/task/")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). BasicAuth("test-user", "test-user-password"). Expect(r). Status(http.StatusOK). @@ -240,7 +240,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("test-user", "test-user-password"). Expect(r). Status(http.StatusOK). diff --git a/tests/cant_allocate_too_big_label_test.go b/tests/cant_allocate_too_big_label_test.go index 8803f67..eb4fe4f 100644 --- a/tests/cant_allocate_too_big_label_test.go +++ b/tests/cant_allocate_too_big_label_test.go @@ -66,7 +66,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":5,"ram":9}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -83,7 +83,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -102,7 +102,7 @@ drivers: t.Run("Application should have state NEW in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -119,7 +119,7 @@ drivers: t.Run("Application should have state NEW in 20 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -136,7 +136,7 @@ drivers: t.Run("Application should have state NEW in 30 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -153,7 +153,7 @@ drivers: t.Run("Application should have state NEW in 40 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -168,7 +168,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -179,7 +179,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/default_lifetime_timeout_test.go b/tests/default_lifetime_timeout_test.go index 4e39404..7542b0a 100644 --- a/tests/default_lifetime_timeout_test.go +++ b/tests/default_lifetime_timeout_test.go @@ -60,7 +60,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [ {"driver":"test","resources":{"cpu":1,"ram":2}} ]}`). @@ -79,7 +79,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -97,7 +97,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -115,7 +115,7 @@ drivers: t.Run("Application should be still ALLOCATED in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -131,7 +131,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/generated_uids_prefix_is_node_prefix_test.go b/tests/generated_uids_prefix_is_node_prefix_test.go index 6a95c75..2387d1a 100644 --- a/tests/generated_uids_prefix_is_node_prefix_test.go +++ b/tests/generated_uids_prefix_is_node_prefix_test.go @@ -66,7 +66,7 @@ drivers: t.Run("Get node data", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/")). + Get(afi.APIAddress("api/v1/node/")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -86,7 +86,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -107,7 +107,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -129,7 +129,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -154,7 +154,7 @@ drivers: t.Run("Resource should be created", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -177,7 +177,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -188,7 +188,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/helper/copy.go b/tests/helper/copy.go index b8552d4..fc6a35b 100644 --- a/tests/helper/copy.go +++ b/tests/helper/copy.go @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ +// Package helper makes our integration tests easier package helper import ( @@ -18,7 +19,7 @@ import ( "path/filepath" ) -// Copy files around +// CopyFile will copy files around func CopyFile(src, dst string) error { fin, err := os.Open(src) if err != nil { diff --git a/tests/helper/fish.go b/tests/helper/fish.go index 3b2a35d..1f52df1 100644 --- a/tests/helper/fish.go +++ b/tests/helper/fish.go @@ -26,7 +26,7 @@ import ( var fishPath = os.Getenv("FISH_PATH") // Full path to the aquarium-fish binary -// Saves state of the running Aquarium Fish for particular test +// AFInstance saves state of the running Aquarium Fish for particular test type AFInstance struct { workspace string fishKill context.CancelFunc @@ -38,7 +38,7 @@ type AFInstance struct { adminToken string } -// Simple creates and run the fish node +// NewAquariumFish simple creates and run the fish node func NewAquariumFish(tb testing.TB, name, cfg string, args ...string) *AFInstance { tb.Helper() afi := NewAfInstance(tb, name, cfg) @@ -47,7 +47,7 @@ func NewAquariumFish(tb testing.TB, name, cfg string, args ...string) *AFInstanc return afi } -// If you need to create instance without starting it up right away +// NewAfInstance helpful if you need to create instance without starting it up right away func NewAfInstance(tb testing.TB, name, cfg string) *AFInstance { tb.Helper() tb.Log("INFO: Creating new node:", name) @@ -65,55 +65,55 @@ func NewAfInstance(tb testing.TB, name, cfg string) *AFInstance { return afi } -// Start another node of cluster +// NewClusterNode starts another node of cluster // It will automatically add cluster_join parameter to the config -func (afi1 *AFInstance) NewClusterNode(tb testing.TB, name, cfg string, args ...string) *AFInstance { +func (afi *AFInstance) NewClusterNode(tb testing.TB, name, cfg string, args ...string) *AFInstance { tb.Helper() - afi2 := afi1.NewAfInstanceCluster(tb, name, cfg) + afi2 := afi.NewAfInstanceCluster(tb, name, cfg) afi2.Start(tb, args...) return afi2 } -// Just create the node based on the existing cluster node -func (afi1 *AFInstance) NewAfInstanceCluster(tb testing.TB, name, cfg string) *AFInstance { +// NewAfInstanceCluster just creates the node based on the existing cluster node +func (afi *AFInstance) NewAfInstanceCluster(tb testing.TB, name, cfg string) *AFInstance { tb.Helper() - tb.Log("INFO: Creating new cluster node with seed node:", afi1.nodeName) - cfg += fmt.Sprintf("\ncluster_join: [%q]", afi1.endpoint) + tb.Log("INFO: Creating new cluster node with seed node:", afi.nodeName) + cfg += fmt.Sprintf("\ncluster_join: [%q]", afi.endpoint) afi2 := NewAfInstance(tb, name, cfg) // Copy seed node CA to generate valid cluster node cert - if err := CopyFile(filepath.Join(afi1.workspace, "fish_data", "ca.key"), filepath.Join(afi2.workspace, "fish_data", "ca.key")); err != nil { + if err := CopyFile(filepath.Join(afi.workspace, "fish_data", "ca.key"), filepath.Join(afi2.workspace, "fish_data", "ca.key")); err != nil { tb.Fatalf("ERROR: Unable to copy CA key: %v", err) } - if err := CopyFile(filepath.Join(afi1.workspace, "fish_data", "ca.crt"), filepath.Join(afi2.workspace, "fish_data", "ca.crt")); err != nil { + if err := CopyFile(filepath.Join(afi.workspace, "fish_data", "ca.crt"), filepath.Join(afi2.workspace, "fish_data", "ca.crt")); err != nil { tb.Fatalf("ERROR: Unable to copy CA crt: %v", err) } return afi2 } -// Will return just IP:PORT +// Endpoint will return IP:PORT func (afi *AFInstance) Endpoint() string { return afi.endpoint } -// Will return url to access API of AquariumFish -func (afi *AFInstance) ApiAddress(path string) string { +// APIAddress will return url to access API of AquariumFish +func (afi *AFInstance) APIAddress(path string) string { return fmt.Sprintf("https://%s/%s", afi.endpoint, path) } -// Will return workspace of the AquariumFish +// Workspace will return workspace of the AquariumFish func (afi *AFInstance) Workspace() string { return afi.workspace } -// Returns admin token +// AdminToken returns admin token func (afi *AFInstance) AdminToken() string { return afi.adminToken } -// Check the fish instance is running +// IsRunning checks the fish instance is running func (afi *AFInstance) IsRunning() bool { return afi.running } @@ -134,7 +134,7 @@ func (afi *AFInstance) Cleanup(tb testing.TB) { os.RemoveAll(afi.workspace) } -// Stops the fish node executable +// Stop the fish node executable func (afi *AFInstance) Stop(tb testing.TB) { tb.Helper() if afi.cmd == nil || !afi.running { @@ -156,7 +156,7 @@ func (afi *AFInstance) Stop(tb testing.TB) { afi.fishKill() } -// Starts the fish node executable +// Start the fish node executable func (afi *AFInstance) Start(tb testing.TB, args ...string) { tb.Helper() if afi.running { diff --git a/tests/helper/retry.go b/tests/helper/retry.go index bcaa5ee..4700f68 100644 --- a/tests/helper/retry.go +++ b/tests/helper/retry.go @@ -38,35 +38,42 @@ type R struct { output []string } -func (r *R) Helper() {} +// Helper shows this struct as helper +func (*R) Helper() {} var runFailed = struct{}{} +// FailNow will fail the retry func (r *R) FailNow() { r.fail = true panic(runFailed) } +// Fatal fail and log func (r *R) Fatal(args ...any) { r.log(fmt.Sprint(args...)) r.FailNow() } +// Fatalf fail and log func (r *R) Fatalf(format string, args ...any) { r.log(fmt.Sprintf(format, args...)) r.FailNow() } +// Error log error func (r *R) Error(args ...any) { r.log(fmt.Sprint(args...)) r.fail = true } +// Errorf log error func (r *R) Errorf(format string, args ...any) { r.log(fmt.Sprintf(format, args...)) r.fail = true } +// Check check if everything is ok func (r *R) Check(err error) { if err != nil { r.log(err.Error()) @@ -98,6 +105,7 @@ func decorate(s string) string { return fmt.Sprintf("%s:%d: %s", file, line, s) } +// Retry again func Retry(r Retryer, t Failer, f func(r *R)) { t.Helper() run(r, t, f) @@ -169,17 +177,18 @@ type Counter struct { Count int Wait time.Duration - count int + intCount int } +// Continue counter func (r *Counter) Continue() bool { - if r.count == r.Count { + if r.intCount == r.Count { return false } - if r.count > 0 { + if r.intCount > 0 { time.Sleep(r.Wait) } - r.count++ + r.intCount++ return true } @@ -194,6 +203,7 @@ type Timer struct { stop time.Time } +// Continue the timer func (r *Timer) Continue() bool { if r.stop.IsZero() { r.stop = time.Now().Add(r.Timeout) diff --git a/tests/helper/t_mock.go b/tests/helper/t_mock.go index 18e1525..69c8dfe 100644 --- a/tests/helper/t_mock.go +++ b/tests/helper/t_mock.go @@ -18,7 +18,7 @@ import ( "testing" ) -// Useful to capture the failed test +// MockT is useful to capture the failed test type MockT struct { testing.T @@ -27,29 +27,35 @@ type MockT struct { t *testing.T } +// FailNow when it's the right time func (m *MockT) FailNow() { m.FailNowCalled = true runtime.Goexit() } +// Log message func (m *MockT) Log(args ...any) { m.t.Log(args...) } +// Logf message func (m *MockT) Logf(format string, args ...any) { m.t.Logf(format, args...) } +// Fatal message func (m *MockT) Fatal(args ...any) { m.t.Log(args...) m.FailNow() } +// Fatalf message func (m *MockT) Fatalf(format string, args ...any) { m.t.Logf(format, args...) m.FailNow() } +// xpectFailure when failure expected func ExpectFailure(t *testing.T, f func(tt testing.TB)) { t.Helper() var wg sync.WaitGroup diff --git a/tests/json_label_create_test.go b/tests/json_label_create_test.go index b6a0a83..be0b200 100644 --- a/tests/json_label_create_test.go +++ b/tests/json_label_create_test.go @@ -61,7 +61,7 @@ drivers: t.Run("Create & check JSON Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). Header("Content-Type", "application/json"). Body(`{"name":"test-label","version":1,"definitions":[{"driver":"test","options":{"fail_options_apply":0},"resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). diff --git a/tests/label_find_filter_sql_injection_test.go b/tests/label_find_filter_sql_injection_test.go index 0232a01..a109cbc 100644 --- a/tests/label_find_filter_sql_injection_test.go +++ b/tests/label_find_filter_sql_injection_test.go @@ -62,7 +62,7 @@ drivers: t.Run("Create Label 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label1", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -79,7 +79,7 @@ drivers: t.Run("Create Label 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label2", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -96,7 +96,7 @@ drivers: t.Run("Find Label 1 with simple SQL injection", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name = '`+label1.Name+`'; DROP label`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -115,7 +115,7 @@ drivers: t.Run("Find no labels with subquery SQL injection", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name IN (DROP label)`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -131,7 +131,7 @@ drivers: t.Run("Find no labels with stupid SQL injection", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `DROP label`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -147,7 +147,7 @@ drivers: t.Run("Find Label 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name='`+label1.Name+`'`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -166,7 +166,7 @@ drivers: t.Run("Find Label 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name = '`+label2.Name+`'`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -185,7 +185,7 @@ drivers: t.Run("Find all labels with LIKE", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name LIKE 'test-label%'`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -201,7 +201,7 @@ drivers: t.Run("Find all labels with IN", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/label/")). + Get(afi.APIAddress("api/v1/label/")). Query("filter", `name IN ('`+label1.Name+`', '`+label2.Name+`')`). BasicAuth("admin", afi.AdminToken()). Expect(t). diff --git a/tests/label_lifetime_timeout_test.go b/tests/label_lifetime_timeout_test.go index fe1788d..413457e 100644 --- a/tests/label_lifetime_timeout_test.go +++ b/tests/label_lifetime_timeout_test.go @@ -59,7 +59,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [ {"driver":"test","resources":{"cpu":1,"ram":2,"lifetime":"15s"}} ]}`). @@ -78,7 +78,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -96,7 +96,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -114,7 +114,7 @@ drivers: t.Run("Application should be still ALLOCATED in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -130,7 +130,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/label_overrides_default_lifetime_timeout_test.go b/tests/label_overrides_default_lifetime_timeout_test.go index 538ad4d..62629ce 100644 --- a/tests/label_overrides_default_lifetime_timeout_test.go +++ b/tests/label_overrides_default_lifetime_timeout_test.go @@ -60,7 +60,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [ {"driver":"test","resources":{"cpu":1,"ram":2,"lifetime":"15s"}} ]}`). @@ -79,7 +79,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -97,7 +97,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -115,7 +115,7 @@ drivers: t.Run("Application should be still ALLOCATED in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -131,7 +131,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/maintenance_mode_test.go b/tests/maintenance_mode_test.go index 2ff4977..9cb22b5 100644 --- a/tests/maintenance_mode_test.go +++ b/tests/maintenance_mode_test.go @@ -63,7 +63,7 @@ drivers: t.Run("Send maintenance request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -74,7 +74,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -91,7 +91,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -110,7 +110,7 @@ drivers: apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -125,7 +125,7 @@ drivers: t.Run("Send maintenance cancel request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -137,7 +137,7 @@ drivers: h.Retry(&h.Timer{Timeout: 20 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/multiple_driver_instances_test.go b/tests/multiple_driver_instances_test.go index dfe15bc..0126e62 100644 --- a/tests/multiple_driver_instances_test.go +++ b/tests/multiple_driver_instances_test.go @@ -69,7 +69,7 @@ drivers: t.Run("Create bad Label with test/dev driver", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test/dev", "resources":{"cpu":5,"ram":9}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -86,7 +86,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -105,7 +105,7 @@ drivers: t.Run("Application should have state NEW in 10 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -122,7 +122,7 @@ drivers: t.Run("Application should have state NEW in 20 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -139,7 +139,7 @@ drivers: t.Run("Application should have state NEW in 30 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -156,7 +156,7 @@ drivers: t.Run("Application should have state NEW in 40 sec", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -171,7 +171,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -182,7 +182,7 @@ drivers: h.Retry(&h.Timer{Timeout: 5 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -198,7 +198,7 @@ drivers: t.Run("Create good Label with test/prod driver", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":2, "definitions": [{"driver":"test/prod", "resources":{"cpu":5,"ram":9}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -214,7 +214,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -231,7 +231,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -247,7 +247,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -258,7 +258,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/node_filter_test.go b/tests/node_filter_test.go index 50220d3..b3750f0 100644 --- a/tests/node_filter_test.go +++ b/tests/node_filter_test.go @@ -65,7 +65,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test","resources":{"node_filter":["example:test"],"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -82,7 +82,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -100,7 +100,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -154,7 +154,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test","resources":{"node_filter":["example:test", "example2:test2"],"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -171,7 +171,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -189,7 +189,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -242,7 +242,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test","resources":{"node_filter":["example:test", "wrong:notthesame"],"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -259,7 +259,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -278,7 +278,7 @@ drivers: apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -329,7 +329,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test","resources":{"node_filter":["example:test", "wrong:notthesame"],"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -346,7 +346,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -365,7 +365,7 @@ drivers: apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). diff --git a/tests/shutdown_mode_test.go b/tests/shutdown_mode_test.go index 031c9cf..6af3898 100644 --- a/tests/shutdown_mode_test.go +++ b/tests/shutdown_mode_test.go @@ -64,7 +64,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -81,7 +81,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -99,7 +99,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -115,7 +115,7 @@ drivers: t.Run("Send maintenance + shutdown request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("shutdown", "true"). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -133,7 +133,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -185,7 +185,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -202,7 +202,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -220,7 +220,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -236,7 +236,7 @@ drivers: t.Run("Send immediate shutdown request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). Query("shutdown", "true"). BasicAuth("admin", afi.AdminToken()). @@ -291,7 +291,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -308,7 +308,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -326,7 +326,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -342,7 +342,7 @@ drivers: t.Run("Send immediate shutdown request with delay", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). Query("shutdown", "true"). Query("shutdown_delay", "11s"). @@ -406,7 +406,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -423,7 +423,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -441,7 +441,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -457,7 +457,7 @@ drivers: t.Run("Send maintenance + shutdown request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("shutdown", "true"). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -475,7 +475,7 @@ drivers: t.Run("Send shutdown cancel request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). Query("shutdown", "false"). BasicAuth("admin", afi.AdminToken()). @@ -487,7 +487,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -498,7 +498,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -556,7 +556,7 @@ drivers: t.Run("Send shutdown request with delay", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). Query("shutdown", "true"). Query("shutdown_delay", "15s"). @@ -576,7 +576,7 @@ drivers: t.Run("Send shutdown cancel request", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/node/this/maintenance")). + Get(afi.APIAddress("api/v1/node/this/maintenance")). Query("enable", "false"). Query("shutdown", "false"). BasicAuth("admin", afi.AdminToken()). diff --git a/tests/simple_app_create_destroy_test.go b/tests/simple_app_create_destroy_test.go index 01dd316..c3ceebe 100644 --- a/tests/simple_app_create_destroy_test.go +++ b/tests/simple_app_create_destroy_test.go @@ -62,7 +62,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":1,"ram":2}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -79,7 +79,7 @@ drivers: t.Run("Create Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -97,7 +97,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -114,7 +114,7 @@ drivers: t.Run("Resource should be created", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -129,7 +129,7 @@ drivers: t.Run("Deallocate the Application", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -140,7 +140,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/three_apps_with_limit_fish_restart_test.go b/tests/three_apps_with_limit_fish_restart_test.go index 22a24e6..b5f8a65 100644 --- a/tests/three_apps_with_limit_fish_restart_test.go +++ b/tests/three_apps_with_limit_fish_restart_test.go @@ -67,7 +67,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":2,"ram":4}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -84,7 +84,7 @@ drivers: t.Run("Create Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -101,7 +101,7 @@ drivers: t.Run("Create Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -118,7 +118,7 @@ drivers: t.Run("Create Application 3", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -136,7 +136,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -153,7 +153,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -169,7 +169,7 @@ drivers: t.Run("Application 3 should have state NEW", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -189,7 +189,7 @@ drivers: t.Run("Application 1 should be ALLOCATED right after restart", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -204,7 +204,7 @@ drivers: t.Run("Application 2 should be ALLOCATED right after restart", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -219,7 +219,7 @@ drivers: t.Run("Application 3 still should have state NEW", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -234,7 +234,7 @@ drivers: t.Run("Deallocate the Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -244,7 +244,7 @@ drivers: t.Run("Deallocate the Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -255,7 +255,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -272,7 +272,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -289,7 +289,7 @@ drivers: h.Retry(&h.Timer{Timeout: 40 * time.Second, Wait: 5 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -305,7 +305,7 @@ drivers: t.Run("Deallocate the Application 3", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -316,7 +316,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/three_apps_with_limit_test.go b/tests/three_apps_with_limit_test.go index 829449a..d758559 100644 --- a/tests/three_apps_with_limit_test.go +++ b/tests/three_apps_with_limit_test.go @@ -65,7 +65,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":2,"ram":4}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -82,7 +82,7 @@ drivers: t.Run("Create Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -99,7 +99,7 @@ drivers: t.Run("Create Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -116,7 +116,7 @@ drivers: t.Run("Create Application 3", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -134,7 +134,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -151,7 +151,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -167,7 +167,7 @@ drivers: t.Run("Application 3 should have state NEW", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -182,7 +182,7 @@ drivers: t.Run("Deallocate the Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -192,7 +192,7 @@ drivers: t.Run("Deallocate the Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -203,7 +203,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -220,7 +220,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -237,7 +237,7 @@ drivers: h.Retry(&h.Timer{Timeout: 40 * time.Second, Wait: 5 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -253,7 +253,7 @@ drivers: t.Run("Deallocate the Application 3", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -264,7 +264,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app3.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app3.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/two_apps_with_limit_test.go b/tests/two_apps_with_limit_test.go index b5ddea8..7d4484d 100644 --- a/tests/two_apps_with_limit_test.go +++ b/tests/two_apps_with_limit_test.go @@ -67,7 +67,7 @@ drivers: t.Run("Create Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). JSON(`{"name":"test-label", "version":1, "definitions": [{"driver":"test", "resources":{"cpu":4,"ram":8}}]}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -84,7 +84,7 @@ drivers: t.Run("Create Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -101,7 +101,7 @@ drivers: t.Run("Create Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/application/")). + Post(afi.APIAddress("api/v1/application/")). JSON(`{"label_UID":"`+label.UID.String()+`"}`). BasicAuth("admin", afi.AdminToken()). Expect(t). @@ -119,7 +119,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -135,7 +135,7 @@ drivers: t.Run("Application 2 should have state NEW", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -151,7 +151,7 @@ drivers: t.Run("Resource 1 should be created", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -166,7 +166,7 @@ drivers: t.Run("Deallocate the Application 1", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -177,7 +177,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app1.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app1.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -194,7 +194,7 @@ drivers: h.Retry(&h.Timer{Timeout: 40 * time.Second, Wait: 5 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). @@ -210,7 +210,7 @@ drivers: t.Run("Resource 2 should be created", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/resource")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/resource")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -225,7 +225,7 @@ drivers: t.Run("Deallocate the Application 2", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/deallocate")). BasicAuth("admin", afi.AdminToken()). Expect(t). Status(http.StatusOK). @@ -236,7 +236,7 @@ drivers: h.Retry(&h.Timer{Timeout: 10 * time.Second, Wait: 1 * time.Second}, t, func(r *h.R) { apitest.New(). EnableNetworking(cli). - Get(afi.ApiAddress("api/v1/application/"+app2.UID.String()+"/state")). + Get(afi.APIAddress("api/v1/application/"+app2.UID.String()+"/state")). BasicAuth("admin", afi.AdminToken()). Expect(r). Status(http.StatusOK). diff --git a/tests/yaml_label_create_test.go b/tests/yaml_label_create_test.go index 0f92519..42517c7 100644 --- a/tests/yaml_label_create_test.go +++ b/tests/yaml_label_create_test.go @@ -61,7 +61,7 @@ drivers: t.Run("Create & check YAML Label", func(t *testing.T) { apitest.New(). EnableNetworking(cli). - Post(afi.ApiAddress("api/v1/label/")). + Post(afi.APIAddress("api/v1/label/")). Header("Content-Type", "application/yaml"). Body(`--- name: test-label From 3dc66fddfc65e3f71c87ba901c0474e83f8191d3 Mon Sep 17 00:00:00 2001 From: Sergei Parshev Date: Sat, 7 Sep 2024 01:09:00 -0400 Subject: [PATCH 5/6] Fixed panic in maintenance shutdown tests --- lib/fish/fish.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/fish/fish.go b/lib/fish/fish.go index 4ce469c..72dfb71 100644 --- a/lib/fish/fish.go +++ b/lib/fish/fish.go @@ -854,13 +854,10 @@ func (f *Fish) activateShutdown() { // Running the main shutdown routine go func() { fireShutdown := make(chan bool, 1) - var delayTickerReport *time.Ticker - var delayTimer *time.Timer + delayTickerReport := &time.Ticker{} + delayTimer := &time.Timer{} var delayEndTime time.Time - defer delayTickerReport.Stop() - defer delayTimer.Stop() - for { select { case <-f.shutdownCancel: @@ -873,6 +870,10 @@ func (f *Fish) activateShutdown() { delayEndTime = time.Now().Add(f.shutdownDelay) delayTickerReport = time.NewTicker(30 * time.Second) delayTimer = time.NewTimer(f.shutdownDelay) + + // Those defers will be executed just once, so no issues with loop & defer + defer delayTickerReport.Stop() //nolint:revive + defer delayTimer.Stop() //nolint:revive } else { // No delay is needed, so shutdown now fireShutdown <- true From 23beddcdeba2c8b4212d98e7bc5519576143930e Mon Sep 17 00:00:00 2001 From: Sergei Parshev Date: Sat, 7 Sep 2024 10:04:22 -0400 Subject: [PATCH 6/6] Fixed last integration test and added some details on lints & benchmarks --- README.md | 28 +++++++++++++++++++ .../application_task_notexisting_fail_test.go | 6 ++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 67d20d5..8f96326 100644 --- a/README.md +++ b/README.md @@ -217,6 +217,15 @@ Is relatively easy - you change logic, you run `./build.sh` to create a binary, the PR when you think it's perfect enough. That will be great if you can ask in the discussions or create an issue on GitHub to align with the current direction and the plans. +### Linting + +Fish uses golangci-lint to execute a huge number of static checks and you can run it locally like: +```sh +$ golangci-lint run -v +``` + +It uses the configuration from .golangci.yml file. + ### Integration tests To verify that everything works as expected you can run integration tests like that: @@ -224,6 +233,25 @@ To verify that everything works as expected you can run integration tests like t $ FISH_PATH=$PWD/aquarium-fish.darwin_amd64 go test -v -failfast -parallel 4 ./tests/... ``` +### Benchmarks + +Fish contains a few benchmarks to make sure the performance of the node & cluster will be stable. +You can run them locally like that: +```sh +$ go test -bench . -benchmem '-run=^#' -cpu 1,2 ./... +goos: darwin +goarch: amd64 +pkg: github.com/adobe/aquarium-fish/lib/crypt +cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz +Benchmark_hash_new 20 65924472 ns/op 67122440 B/op 180 allocs/op +Benchmark_hash_new-2 33 34709165 ns/op 67122834 B/op 181 allocs/op +Benchmark_hash_isequal 33 64242662 ns/op 67122424 B/op 179 allocs/op +Benchmark_hash_isequal-2 32 34741325 ns/op 67122526 B/op 179 allocs/op$ +``` + +CI stores the previous results in branch gh-pages in json format. Unfortunately GitHub actions +workers perfromance is not stable, so it's recommended to execute the benchmarks on standaline. + ### Profiling Is available through pprof like that: diff --git a/tests/application_task_notexisting_fail_test.go b/tests/application_task_notexisting_fail_test.go index 7589eeb..a22966b 100644 --- a/tests/application_task_notexisting_fail_test.go +++ b/tests/application_task_notexisting_fail_test.go @@ -108,7 +108,7 @@ drivers: }) var appTask types.ApplicationTask - t.Run("Create ApplicationTask Snapshot", func(t *testing.T) { + t.Run("Create unavailable ApplicationTask", func(t *testing.T) { apitest.New(). EnableNetworking(cli). Post(afi.APIAddress("api/v1/application/"+app.UID.String()+"/task/")). @@ -119,6 +119,8 @@ drivers: End(). JSON(&appTask) + // ApplicationTask will be created anyway even with wrong name, because input Fish node could + // not be able to validate it, since could have different config or lack of enabled drivers if appTask.UID == uuid.Nil { t.Fatalf("ApplicationTask UID is incorrect: %v", appTask.UID) } @@ -142,7 +144,7 @@ drivers: if appTasks[0].UID != appTask.UID { r.Fatalf("ApplicationTask UID is incorrect: %v != %v", appTasks[0].UID, appTask.UID) } - if string(appTasks[0].Result) != `{"error":"task not availble in driver"}` { + if string(appTasks[0].Result) != `{"error":"task not available in driver"}` { r.Fatalf("ApplicationTask result is incorrect: %v", appTasks[0].Result) } })