Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: [GCP Batch] Support passing standard machine types to the Google backend #1

Merged
merged 2 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ be found [here](https://cromwell.readthedocs.io/en/stable/backends/HPC/#optional
### GCP Batch

- The `genomics` configuration entry was renamed to `batch`, see [ReadTheDocs](https://cromwell.readthedocs.io/en/stable/backends/GCPBatch/) for more information.
- Fixes a bug with not being able to recover jobs on Cromwell restart.
- Fixes machine type selection to match the Google Cloud Life Sciences backend, including default n1 non shared-core machine types and correct handling of `cpuPlatform` to select n2 or n2d machine types as appropriate.
- Fixes the preemption error handling, now, the correct error message is printed, this also handles the other potential exit codes.
- Fixes pulling Docker image metadata from private GCR repositories.
- Fixed `google_project` and `google_compute_service_account` workflow options not taking effect when using GCP Batch backend
Expand Down
1 change: 1 addition & 0 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ lazy val googlePipelinesV2Beta = (project in backendRoot / "google" / "pipelines

lazy val googleBatch = (project in backendRoot / "google" / "batch")
.withLibrarySettings("cromwell-google-batch-backend")
.dependsOn(core)
.dependsOn(backend)
.dependsOn(gcsFileSystem)
.dependsOn(drsFileSystem)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: papi_cpu_platform
testFormat: workflowsuccess
backends: [Papiv2]
backendsMode: any
backends: [Papiv2, GCPBATCH]

files {
workflow: papi_cpu_platform/papi_cpu_platform.wdl
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ trait BatchApiRunCreationClient { this: Actor with ActorLogging with BatchInstru
backendSingletonActor ! BatchApiRequestManager.BatchRunCreationRequest(
request.workflowId,
self,
requestFactory.submitRequest(request)
requestFactory.submitRequest(request, jobLogger)
)
val newPromise = Promise[StandardAsyncJob]()
runCreationClientPromise = Option(newPromise)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,14 @@ import cromwell.backend.google.batch.io.GcpBatchAttachedDisk
import cromwell.backend.google.batch.models.GcpBatchConfigurationAttributes.VirtualPrivateCloudConfiguration
import cromwell.backend.google.batch.models._
import cromwell.backend.google.batch.monitoring.{CheckpointingConfiguration, MonitoringImage}
import cromwell.core.logging.JobLogger
import cromwell.core.path.Path
import wom.runtime.WomOutputRuntimeExtractor

import scala.concurrent.duration.FiniteDuration

trait GcpBatchRequestFactory {
def submitRequest(data: GcpBatchRequest): CreateJobRequest
def submitRequest(data: GcpBatchRequest, jobLogger: JobLogger): CreateJobRequest

def queryRequest(jobName: JobName): GetJobRequest

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ import cromwell.backend.google.batch.io.GcpBatchAttachedDisk
import cromwell.backend.google.batch.models.GcpBatchConfigurationAttributes.GcsTransferConfiguration
import cromwell.backend.google.batch.models.{GcpBatchRequest, VpcAndSubnetworkProjectLabelValues}
import cromwell.backend.google.batch.runnable._
import cromwell.backend.google.batch.util.BatchUtilityConversions
import cromwell.backend.google.batch.util.{BatchUtilityConversions, GcpBatchMachineConstraints}
import cromwell.core.logging.JobLogger

import scala.jdk.CollectionConverters._

Expand Down Expand Up @@ -74,14 +75,16 @@ class GcpBatchRequestFactoryImpl()(implicit gcsTransferConfiguration: GcsTransfe
private def createInstancePolicy(cpuPlatform: String,
spotModel: ProvisioningModel,
accelerators: Option[Accelerator.Builder],
attachedDisks: List[AttachedDisk]
attachedDisks: List[AttachedDisk],
machineType: String
): InstancePolicy.Builder = {

// set GPU count to 0 if not included in workflow
val gpuAccelerators = accelerators.getOrElse(Accelerator.newBuilder.setCount(0).setType("")) // TODO: Driver version

val instancePolicy = InstancePolicy.newBuilder
.setProvisioningModel(spotModel)
.setMachineType(machineType)
.addAllDisks(attachedDisks.asJava)
.setMinCpuPlatform(cpuPlatform)
.buildPartial()
Expand Down Expand Up @@ -154,7 +157,7 @@ class GcpBatchRequestFactoryImpl()(implicit gcsTransferConfiguration: GcsTransfe
}
}

override def submitRequest(data: GcpBatchRequest): CreateJobRequest = {
override def submitRequest(data: GcpBatchRequest, jobLogger: JobLogger): CreateJobRequest = {

val runtimeAttributes = data.gcpBatchParameters.runtimeAttributes
val createParameters = data.createParameters
Expand Down Expand Up @@ -224,7 +227,15 @@ class GcpBatchRequestFactoryImpl()(implicit gcsTransferConfiguration: GcsTransfe
val computeResource = createComputeResource(cpuCores, memory, gcpBootDiskSizeMb)
val taskSpec = createTaskSpec(sortedRunnables, computeResource, retryCount, durationInSeconds, allVolumes)
val taskGroup: TaskGroup = createTaskGroup(taskCount, taskSpec)
val instancePolicy = createInstancePolicy(cpuPlatform, spotModel, accelerators, allDisks)
val machineType = GcpBatchMachineConstraints.machineType(runtimeAttributes.memory,
runtimeAttributes.cpu,
cpuPlatformOption = runtimeAttributes.cpuPlatform,
standardMachineTypeOption = runtimeAttributes.standardMachineType,
googleLegacyMachineSelection = false,
jobLogger = jobLogger
)
val instancePolicy =
createInstancePolicy(cpuPlatform = cpuPlatform, spotModel, accelerators, allDisks, machineType = machineType)
val locationPolicy = LocationPolicy.newBuilder.addAllowedLocations(zones).build
val allocationPolicy =
createAllocationPolicy(data, locationPolicy, instancePolicy.build, networkPolicy, gcpSa, accelerators)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import wom.format.MemorySize

import scala.math.{log, pow}

case class StandardMachineType(machineType: String) {}

/**
* Adjusts memory and cpu for custom machine types.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ final case class GcpBatchRuntimeAttributes(cpu: Int Refined Positive,
continueOnReturnCode: ContinueOnReturnCode,
noAddress: Boolean,
useDockerImageCache: Option[Boolean],
checkpointFilename: Option[String]
checkpointFilename: Option[String],
standardMachineType: Option[String]
)

object GcpBatchRuntimeAttributes {
Expand Down Expand Up @@ -77,13 +78,16 @@ object GcpBatchRuntimeAttributes {
private val cpuPlatformValidationInstance = new StringRuntimeAttributesValidation(CpuPlatformKey).optional
// via `gcloud compute zones describe us-central1-a`
val CpuPlatformIntelCascadeLakeValue = "Intel Cascade Lake"
val CpuPlatformIntelIceLakeValue = "Intel Ice Lake"
val CpuPlatformAMDRomeValue = "AMD Rome"

val UseDockerImageCacheKey = "useDockerImageCache"
private val useDockerImageCacheValidationInstance = new BooleanRuntimeAttributesValidation(
UseDockerImageCacheKey
).optional

val StandardMachineTypeKey = "standardMachineType"

val CheckpointFileKey = "checkpointFile"
private val checkpointFileValidationInstance = new StringRuntimeAttributesValidation(CheckpointFileKey).optional

Expand All @@ -97,6 +101,8 @@ object GcpBatchRuntimeAttributes {
)
private def cpuPlatformValidation(runtimeConfig: Option[Config]): OptionalRuntimeAttributesValidation[String] =
cpuPlatformValidationInstance
private def standardMachineTypeValidation(runtimeConfig: Option[Config]): OptionalRuntimeAttributesValidation[String] =
new StringRuntimeAttributesValidation(StandardMachineTypeKey).optional
private def gpuTypeValidation(runtimeConfig: Option[Config]): OptionalRuntimeAttributesValidation[GpuType] =
GpuTypeValidation.optional

Expand Down Expand Up @@ -170,7 +176,8 @@ object GcpBatchRuntimeAttributes {
bootDiskSizeValidation(runtimeConfig),
useDockerImageCacheValidation(runtimeConfig),
checkpointFileValidationInstance,
dockerValidation
dockerValidation,
standardMachineTypeValidation(runtimeConfig)
)
}

Expand Down Expand Up @@ -227,6 +234,10 @@ object GcpBatchRuntimeAttributes {
useDockerImageCacheValidation(runtimeAttrsConfig).key,
validatedRuntimeAttributes
)
val standardMachineType: Option[String] = RuntimeAttributesValidation.extractOption(
standardMachineTypeValidation(runtimeAttrsConfig).key,
validatedRuntimeAttributes
)

new GcpBatchRuntimeAttributes(
cpu = cpu,
Expand All @@ -242,7 +253,8 @@ object GcpBatchRuntimeAttributes {
continueOnReturnCode = continueOnReturnCode,
noAddress = noAddress,
useDockerImageCache = useDockerImageCache,
checkpointFilename = checkpointFileName
checkpointFilename = checkpointFileName,
standardMachineType = standardMachineType
)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,29 +4,34 @@ import cromwell.backend.google.batch.models.{
GcpBatchRuntimeAttributes,
N1CustomMachineType,
N2CustomMachineType,
N2DCustomMachineType
N2DCustomMachineType,
StandardMachineType
}
import cromwell.core.logging.JobLogger
import eu.timepit.refined.api.Refined
import eu.timepit.refined.numeric.Positive
import org.slf4j.Logger
import wdl4s.parser.MemoryUnit
import wom.format.MemorySize

object GcpBatchMachineConstraints {
def machineType(memory: MemorySize,
cpu: Int Refined Positive,
cpuPlatformOption: Option[String],
standardMachineTypeOption: Option[String],
googleLegacyMachineSelection: Boolean,
jobLogger: Logger
jobLogger: JobLogger
): String =
if (googleLegacyMachineSelection) {
if (standardMachineTypeOption.exists(_.trim.nonEmpty)) {
StandardMachineType(standardMachineTypeOption.get).machineType
} else if (googleLegacyMachineSelection) {
s"predefined-$cpu-${memory.to(MemoryUnit.MB).amount.intValue()}"
} else {
// If someone requests Intel Cascade Lake as their CPU platform then switch the machine type to n2.
// If someone requests Intel Cascade Lake or Intel Ice Lake as their CPU platform then switch the machine type to n2.
// Similarly, CPU platform of AMD Rome corresponds to the machine type n2d.
val customMachineType =
cpuPlatformOption match {
case Some(GcpBatchRuntimeAttributes.CpuPlatformIntelCascadeLakeValue) => N2CustomMachineType
case Some(GcpBatchRuntimeAttributes.CpuPlatformIntelIceLakeValue) => N2CustomMachineType
case Some(GcpBatchRuntimeAttributes.CpuPlatformAMDRomeValue) => N2DCustomMachineType
case _ => N1CustomMachineType
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class GcpBatchAsyncBackendJobExecutionActorSpec
val runtimeAttributesBuilder = GcpBatchRuntimeAttributes.runtimeAttributesBuilder(configuration)

val requestFactory: GcpBatchRequestFactory = new GcpBatchRequestFactory {
override def submitRequest(data: GcpBatchRequest): CreateJobRequest = null
override def submitRequest(data: GcpBatchRequest, jobLogger: JobLogger): CreateJobRequest = null

override def queryRequest(jobName: JobName): GetJobRequest = null

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,8 @@ trait GcpBatchRuntimeAttributesSpecsMixin {
continueOnReturnCode = ContinueOnReturnCodeSet(Set(0)),
noAddress = false,
useDockerImageCache = None,
checkpointFilename = None
checkpointFilename = None,
standardMachineType = None
)

def assertBatchRuntimeAttributesSuccessfulCreation(runtimeAttributes: Map[String, WomValue],
Expand Down
Loading
Loading