Skip to content

Can't do rootless workspaces with coder and envbuilder in k8s cluster #181

Closed as not planned
@shiipou

Description

@shiipou

I want to create a rootless workspace with coder and envbuilder in my homelab kubernetes cluster, but I have this error where I can't figure how to pass it.

error: compile devcontainer.json: create features directory: mkdir /.envbuilder/features: permission denied

It seems it doesn't find the .devcontainer folder where the devcontainer.json file is.

It work fine if I didn't set the user (So it will be root)

Full logs
envbuilder - Build development environments from repositories in a container
#1: 📦 Cloning https://github.com/shiipou/simplon-2024-brief-06-TooManyChoco to /workspace...
#1: Enumerating objects: 27, done.
#1: Counting objects:   3% (1/27)
#1: Counting objects:   7% (2/27)
#1: Counting objects:  11% (3/27)
#1: Counting objects:  14% (4/27)
#1: Counting objects:  18% (5/27)
#1: Counting objects:  22% (6/27)
#1: Counting objects:  25% (7/27)
#1: Counting objects:  29% (8/27)
#1: Counting objects:  33% (9/27)
#1: Counting objects:  37% (10/27)
#1: Counting objects:  40% (11/27)
#1: Counting objects:  44% (12/27)
#1: Counting objects:  48% (13/27)
#1: Counting objects:  51% (14/27)
#1: Counting objects:  55% (15/27)
#1: Counting objects:  59% (16/27)
#1: Counting objects:  62% (17/27)
#1: Counting objects:  66% (18/27)
#1: Counting objects:  70% (19/27)
#1: Counting objects:  74% (20/27)
#1: Counting objects:  77% (21/27)
#1: Counting objects:  81% (22/27)
#1: Counting objects:  85% (23/27)
#1: Counting objects:  88% (24/27)
#1: Counting objects:  92% (25/27)
#1: Counting objects:  96% (26/27)
#1: Counting objects: 100% (27/27)
#1: Counting objects: 100% (27/27), done.
#1: Compressing objects:   5% (1/19)
#1: Compressing objects:  10% (2/19)
#1: Compressing objects:  15% (3/19)
#1: Compressing objects:  21% (4/19)
#1: Compressing objects:  26% (5/19)
#1: Compressing objects:  31% (6/19)
#1: Compressing objects:  36% (7/19)
#1: Compressing objects:  42% (8/19)
#1: Compressing objects:  47% (9/19)
#1: Compressing objects:  52% (10/19)
#1: Compressing objects:  57% (11/19)
#1: Compressing objects:  63% (12/19)
#1: Compressing objects:  68% (13/19)
#1: Compressing objects:  73% (14/19)
#1: Compressing objects:  78% (15/19)
#1: Compressing objects:  84% (16/19)
#1: Compressing objects:  89% (17/19)
#1: Compressing objects:  94% (18/19)
#1: Compressing objects: 100% (19/19)
#1: Total 27 (delta 8), reused 16 (delta 4), pack-reused 0
#1: 📦 Cloned repository! [459.376322ms]
error: compile devcontainer.json: create features directory: mkdir /.envbuilder/features: permission denied
envbuilder - Build development environments from repositories in a container
#1: 📦 Cloning https://github.com/shiipou/simplon-2024-brief-06-TooManyChoco to /workspace...
#1: 📦 The repository already exists! [104.122µs]
error: compile devcontainer.json: create features directory: mkdir /.envbuilder/features: permission denied
envbuilder - Build development environments from repositories in a container

Here the rootless template I wrote :

Details
terraform {
  required_providers {
    coder = {
      source = "coder/coder"
    }
    kubernetes = {
      source = "hashicorp/kubernetes"
    }
  }
}

provider "coder" {
}

variable "use_kubeconfig" {
  type        = bool
  description = <<-EOF
  Use host kubeconfig? (true/false)

  Set this to false if the Coder host is itself running as a Pod on the same
  Kubernetes cluster as you are deploying workspaces to.

  Set this to true if the Coder host is running outside the Kubernetes cluster
  for workspaces.  A valid "~/.kube/config" must be present on the Coder host.
  EOF
  default     = false
}

variable "namespace" {
  type        = string
  description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces). If the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to, set this to the same namespace."
}

data "coder_parameter" "repo" {
  name         = "repo"
  display_name = "Repository (auto)"
  order        = 1
  description  = "Select a repository to automatically clone and start working with a devcontainer."
  mutable      = true
  option {
    name        = "vercel/next.js"
    description = "The React Framework"
    value       = "https://github.com/vercel/next.js"
  }
  option {
    name        = "Dev-Container's Images"
    description = "To use it, choose the children directory with `devcontainer_dir` to `src/java/.devcontainer` or to the image you want."
    value       = "https://github.com/devcontainers/images/"
  }
  option {
    name        = "Custom"
    icon        = "/emojis/1f5c3.png"
    description = "Specify a custom repo URL below"
    value       = "custom"
  }
}

data "coder_parameter" "custom_repo_url" {
  name         = "custom_repo"
  display_name = "Repository URL (custom)"
  order        = 2
  default      = ""
  description  = "Optionally enter a custom repository URL, see [awesome-devcontainers](https://github.com/manekinekko/awesome-devcontainers)."
  mutable      = true
}

data "coder_parameter" "devcontainer_dir" {
  name         = "devcontainer_dir"
  display_name = "Dev Container directory"
  order        = 3
  default      = ".devcontainer"
  description  = "Optionnally enter a custom path for the .devcontainer directory. Useful if the template is in a subfolder of the repository (e.g: https://github.com/devcontainers/images/ has devcontainer_dir to `src/java/.devcontainer`)"
  mutable      = true
}

data "coder_parameter" "cpu" {
  name         = "cpu"
  display_name = "CPU"
  order        = 4
  description  = "The number of CPU cores"
  default      = "2"
  icon         = "/icon/memory.svg"
  mutable      = true
  option {
    name  = "2 Cores"
    value = "2"
  }
  option {
    name  = "4 Cores"
    value = "4"
  }
  option {
    name  = "6 Cores"
    value = "6"
  }
  option {
    name  = "8 Cores"
    value = "8"
  }
}

data "coder_parameter" "memory" {
  name         = "memory"
  display_name = "Memory"
  order        = 5
  description  = "The amount of memory in GB"
  default      = "2"
  icon         = "/icon/memory.svg"
  mutable      = true
  option {
    name  = "2 GB"
    value = "2"
  }
  option {
    name  = "4 GB"
    value = "4"
  }
  option {
    name  = "6 GB"
    value = "6"
  }
  option {
    name  = "8 GB"
    value = "8"
  }
}

data "coder_parameter" "home_disk_size" {
  name         = "home_disk_size"
  display_name = "Home disk size"
  order        = 6
  description  = "The size of the home disk in GB"
  default      = "10"
  type         = "number"
  icon         = "/emojis/1f4be.png"
  mutable      = false
  validation {
    min = 1
    max = 99999
  }
}

provider "kubernetes" {
  # Authenticate via ~/.kube/config or a Coder-specific ServiceAccount, depending on admin preferences
  config_path = var.use_kubeconfig == true ? "~/.kube/config" : null
}

data "coder_provisioner" "me" {}
data "coder_workspace" "me" {}

resource "coder_agent" "main" {
  os             = "linux"
  arch           = data.coder_provisioner.me.arch
  startup_script = <<-EOT
    set -e

    # install and start code-server
    curl -fsSL https://code-server.dev/install.sh | sh -s -- --method=standalone --prefix=/tmp/code-server --version 4.11.0
    /tmp/code-server/bin/code-server --auth none --port 13337 >/tmp/code-server.log 2>&1 &
  EOT
  dir            = "/workspace"

  # The following metadata blocks are optional. They are used to display
  # information about your workspace in the dashboard. You can remove them
  # if you don't want to display any information.
  # For basic resources, you can use the `coder stat` command.
  # If you need more control, you can write your own script.
  metadata {
    display_name = "CPU Usage"
    key          = "0_cpu_usage"
    script       = "coder stat cpu"
    interval     = 10
    timeout      = 1
  }

  metadata {
    display_name = "RAM Usage"
    key          = "1_ram_usage"
    script       = "coder stat mem"
    interval     = 10
    timeout      = 1
  }

  metadata {
    display_name = "Home Disk"
    key          = "3_home_disk"
    script       = "coder stat disk --path $${HOME}"
    interval     = 60
    timeout      = 1
  }

  metadata {
    display_name = "CPU Usage (Host)"
    key          = "4_cpu_usage_host"
    script       = "coder stat cpu --host"
    interval     = 10
    timeout      = 1
  }

  metadata {
    display_name = "Memory Usage (Host)"
    key          = "5_mem_usage_host"
    script       = "coder stat mem --host"
    interval     = 10
    timeout      = 1
  }

  metadata {
    display_name = "Load Average (Host)"
    key          = "6_load_host"
    # get load avg scaled by number of cores
    script   = <<EOT
      echo "`cat /proc/loadavg | awk '{ print $1 }'` `nproc`" | awk '{ printf "%0.2f", $1/$2 }'
    EOT
    interval = 60
    timeout  = 1
  }
}

# code-server
resource "coder_app" "code-server" {
  agent_id     = coder_agent.main.id
  slug         = "code-server"
  display_name = "code-server"
  icon         = "/icon/code.svg"
  url          = "http://localhost:13337?folder=/workspace"
  subdomain    = false
  share        = "owner"

  healthcheck {
    url       = "http://localhost:13337/healthz"
    interval  = 3
    threshold = 10
  }
}

resource "kubernetes_persistent_volume_claim" "workspace" {
  metadata {
    name      = "coder-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}-workspace"
    namespace = var.namespace
    labels = {
      "app.kubernetes.io/name"     = "coder-pvc"
      "app.kubernetes.io/instance" = "coder-pvc-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}"
      "app.kubernetes.io/part-of"  = "coder"
      //Coder-specific labels.
      "com.coder.resource"       = "true"
      "com.coder.workspace.id"   = data.coder_workspace.me.id
      "com.coder.workspace.name" = data.coder_workspace.me.name
      "com.coder.user.id"        = data.coder_workspace.me.owner_id
      "com.coder.user.username"  = data.coder_workspace.me.owner
    }
    annotations = {
      "com.coder.user.email" = data.coder_workspace.me.owner_email
    }
  }
  wait_until_bound = false
  spec {
    access_modes = ["ReadWriteOnce"]
    resources {
      requests = {
        storage = "${data.coder_parameter.home_disk_size.value}Gi"
      }
    }
  }
}

resource "kubernetes_deployment" "main" {
  count = 1
  depends_on = [
    kubernetes_persistent_volume_claim.workspace
  ]
  wait_for_rollout = false
  metadata {
    name      = "coder-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}"
    namespace = var.namespace
    labels = {
      "app.kubernetes.io/name"     = "coder-workspace"
      "app.kubernetes.io/instance" = "coder-workspace-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}"
      "app.kubernetes.io/part-of"  = "coder"
      "com.coder.resource"         = "true"
      "com.coder.workspace.id"     = data.coder_workspace.me.id
      "com.coder.workspace.name"   = data.coder_workspace.me.name
      "com.coder.user.id"          = data.coder_workspace.me.owner_id
      "com.coder.user.username"    = data.coder_workspace.me.owner
    }
    annotations = {
      "com.coder.user.email" = data.coder_workspace.me.owner_email
    }
  }

  spec {
    replicas = data.coder_workspace.me.start_count
    selector {
      match_labels = {
        "app.kubernetes.io/name" = "coder-workspace"
        "com.coder.workspace.id" = data.coder_workspace.me.id
      }
    }
    strategy {
      type = "Recreate"
    }

    template {
      metadata {
        labels = {
          "app.kubernetes.io/name" = "coder-workspace"
          "com.coder.workspace.id" = data.coder_workspace.me.id
        }
      }
      spec {
        security_context {
          run_as_user  = 1000
          run_as_group = 1000
          fs_group     = 1000
        }
        container {
          name              = "dev"
          image             = "ghcr.io/coder/envbuilder:0.2.9"
          image_pull_policy = "Always"

          security_context {
            run_as_user = "1000"
          }
          
          env {
            name  = "CODER_AGENT_TOKEN"
            value = coder_agent.main.token
          }
          env {
            name  = "CODER_AGENT_URL"
            value = replace(data.coder_workspace.me.access_url, "/localhost|127\\.0\\.0\\.1/", "host.docker.internal")
          }
          env {
            name  = "GIT_URL"
            value = data.coder_parameter.repo.value == "custom" ? data.coder_parameter.custom_repo_url.value : data.coder_parameter.repo.value
          }
          env {
            name  = "DEVCONTAINER_DIR"
            value = data.coder_parameter.devcontainer_dir.value
          }
          env {
            name  = "INIT_SCRIPT"
            value = replace(coder_agent.main.init_script, "/localhost|127\\.0\\.0\\.1/", "host.docker.internal")
          }
          env {
            name = "WORKSPACE_FOLDER"
            value = "/workspace"
          }
          env {
            name  = "EXIT_ON_BUILD_FAILURE"
            value = "true"
          }
          env {
            name = "DOCKER_HOST"
            value = "tcp://localhost:2375"
          }

          resources {
            requests = {
              "cpu"    = "250m"
              "memory" = "512Mi"
            }
            limits = {
              "cpu"    = "${data.coder_parameter.cpu.value}"
              "memory" = "${data.coder_parameter.memory.value}Gi"
            }
          }
          volume_mount {
            mount_path = "/home/vscode"
            name       = "workspace"
            read_only  = false
            sub_path    = "home"
          }
          volume_mount {
            mount_path = "/workspace"
            name       = "workspace"
            read_only  = false
            sub_path    = "workspace"
          }
        }

        container {
          name = "docker-dind"
          image = "docker:dind"
          security_context {
            run_as_user = "0"
            privileged = true
          }
          command = ["dockerd", "--host", "tcp://127.0.0.1:2375"]
          volume_mount {
            name       = "workspace"
            mount_path = "/var/lib/docker"
            sub_path    = "docker"
          }
        }

        volume {
          name = "workspace"
          persistent_volume_claim {
            claim_name = kubernetes_persistent_volume_claim.workspace.metadata.0.name
            read_only  = false
          }
        }

        affinity {
          // This affinity attempts to spread out all workspace pods evenly across
          // nodes.
          pod_anti_affinity {
            preferred_during_scheduling_ignored_during_execution {
              weight = 1
              pod_affinity_term {
                topology_key = "kubernetes.io/hostname"
                label_selector {
                  match_expressions {
                    key      = "app.kubernetes.io/name"
                    operator = "In"
                    values   = ["coder-workspace"]
                  }
                }
              }
            }
          }
        }
      }
    }
  }
}

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions